kitchen_sink

This program has been disqualified.


Authormuddyshoes
Submission date2011-06-12 07:36:48.120892
Rating8001
Matches played172
Win rate79.65

Source code:

import random
import math

class TurnHistory:

    outcomes = { "RR":0,"RP":-1, "RS":1, "PR":1, "PP":0, "PS":-1, "SR":-1,"SP":1,"SS":0}

    def __init__(self):
        self.turns = []

    def win_lose_or_draw(self, p1, p2 ):
        return TurnHistory.outcomes[p1+p2]

    def append_turn(self,me,opp):
        turn_data = (me, opp, self.win_lose_or_draw(me, opp))
        self.turns.append(turn_data)


class Predictor(object):
    scoring_ratios = [0.1,0.15,0.23,0.33,0.45,0.59,0.72,0.85,1.0]
    winning_moves = {'R':'P','P':'S','S':'R'}

    def __init__(self, turn_history):
        self.prediction_history = []
        self.prediction_score_history = []
        self.turn_history = turn_history

    def get_total_score(self):
        return sum(self.prediction_score_history)

    def get_prediction_score(self):
        hist_length = min(len(Predictor.scoring_ratios),len(self.prediction_score_history))
        if hist_length > 0:
            ratios = Predictor.scoring_ratios[-hist_length:]
            scores = map(lambda x,i: x*i,self.prediction_score_history[-hist_length:],ratios)
            return sum(scores)
        return 0

    def turn_added(self):
        if len(self.prediction_history) > 0:
            predicted_move = Predictor.winning_moves[self.prediction_history[-1]]
            whatif_result = self.turn_history.win_lose_or_draw(predicted_move,self.turn_history.turns[-1][1])
            self.prediction_score_history.append(whatif_result)
        self.inner_turn_added()
        self.prediction_history.append(self.predict_next_opp_move())

    def inner_turn_added(self):
        pass

    def predict_next_opp_move(self):
        pass

    def choose_best(self,scores):
        counts = [x[0] for x in scores]
        m = max(counts)
        return scores[counts.index(m)][1]

    def choose_dist(self,scores):
        counts = [x[1] for x in scores]
        total = sum(counts)
        if total == 0:
            return random.choice(['R','P','S'])
        pick = (random.randint(1,1000)/1000.0)*total
        if( scores[0][1] > pick):
            return scores[0][0]
        elif( scores[0][1] + scores[1][1] > pick):
            return scores[1][0]
        else:
            return scores[2][0]

class IOPairsPredictor(Predictor):

    def __init__(self, turn_history):
        Predictor.__init__(self,turn_history)
        self.history = {}

    def inner_turn_added(self):
        for item in self.history.values():
            for key in item.keys():
                item[key] *= 0.95

        if len(self.turn_history.turns)>1:
            pairkey = self.turn_history.turns[-2][0]+self.turn_history.turns[-2][1]
            if pairkey not in self.history:
                self.history[pairkey] = {'R':0,'P':0,'S':0}
            self.history[pairkey][self.turn_history.turns[-1][1]] += 1

    def predict_next_opp_move(self):
        pairkey = self.turn_history.turns[-1][0]+self.turn_history.turns[-1][1]
        if pairkey in self.history:
            return self.choose_dist(self.history[pairkey].items())
        else:
            return random.choice(['R','P','S'])

class AntiIOPairsPredictor(IOPairsPredictor):
    def predict_next_opp_move(self):
        return Predictor.winning_moves[IOPairsPredictor.predict_next_opp_move(self)]

class LastInputPredictor(Predictor):

    def __init__(self, turn_history):
        Predictor.__init__(self,turn_history)
        self.history = {}

    def inner_turn_added(self):
        for item in self.history.values():
            item['R'] *= 0.95
            item['P'] *= 0.95
            item['S'] *= 0.95

        if len(self.turn_history.turns)>1:
            if self.turn_history.turns[-2][1] not in self.history:
                self.history[self.turn_history.turns[-2][1]] = {'R':0,'P':0,'S':0}
            self.history[self.turn_history.turns[-2][1]][self.turn_history.turns[-1][1]] += 1

    def predict_next_opp_move(self):
        if self.turn_history.turns[-1][1] in self.history:
            return self.choose_dist(self.history[self.turn_history.turns[-1][1]].items())
        else:
            return random.choice(['R','P','S'])

class AntiLastInputPredictor(LastInputPredictor):
    
    def predict_next_opp_move(self):
        move = super(AntiLastInputPredictor,self).predict_next_opp_move()
        return Predictor.winning_moves[move]


class LastOutputPredictor(Predictor):

    def __init__(self, turn_history):
        Predictor.__init__(self,turn_history)
        self.history = {}

    def inner_turn_added(self):
        for item in self.history.values():
            item['R'] *= 0.95
            item['P'] *= 0.95
            item['S'] *= 0.95
        if len(self.turn_history.turns)>1 and self.turn_history.turns[-2][0] not in self.history:
            self.history[self.turn_history.turns[-2][0]] = {'R':0,'P':0,'S':0}
        if len(self.turn_history.turns)>1:
            self.history[self.turn_history.turns[-2][0]][self.turn_history.turns[-1][1]] += 1

    def predict_next_opp_move(self):
        if self.turn_history.turns[-1][0] in self.history:
            return self.choose_dist(self.history[self.turn_history.turns[-1][0]].items())
        else:
            return random.choice(['R','P','S'])

class RecentInputsPredictor(Predictor):

    recent_length = 5

    def __init__(self, turn_history):
        Predictor.__init__(self,turn_history)
        self.history = []
        self.classes = []
        self.nb= None

    def inner_turn_added(self):
        mapping = {'R':0,'P':1,'S':2}
        if len(self.turn_history.turns)>RecentInputDistributionPredictor.recent_length:
            recent = [ mapping[x[1]] for x in self.turn_history.turns[-(RecentInputDistributionPredictor.recent_length+1):-1] ]
            self.history.append(recent)
            self.classes.append(self.turn_history.turns[-1][1])
            self.nb = NaiveBayes.train(self.history,self.classes,typecode = 'i')


    def predict_next_opp_move(self):
        if self.nb:
            mapping = {'R':0,'P':1,'S':2}
            recent = [ mapping[x[1]] for x in self.turn_history.turns[-RecentInputDistributionPredictor.recent_length:] ]
            return self.nb.classify(recent)
        else:
            return random.choice(['R','P','S'])

class RecentInputDistributionPredictor(Predictor):

    recent_length = 5

    def __init__(self, turn_history):
        Predictor.__init__(self,turn_history)
        self.history = []
        self.classes = []
        self.nb= None

    def inner_turn_added(self):
        if len(self.turn_history.turns)>RecentInputDistributionPredictor.recent_length:
            recent = [ x[1] for x in self.turn_history.turns[-(RecentInputDistributionPredictor.recent_length+1):-1] ]
            self.history.append([recent.count('R'),recent.count('P'),recent.count('S')])
            self.classes.append(self.turn_history.turns[-1][1])
            self.nb = NaiveBayes.train(self.history,self.classes,typecode = 'i')


    def predict_next_opp_move(self):
        if self.nb:
            recent = [ x[1] for x in self.turn_history.turns[-RecentInputDistributionPredictor.recent_length:] ]
            return self.nb.classify([recent.count('R'),recent.count('P'),recent.count('S')])
        else:
            return random.choice(['R','P','S'])
class SimplePatternPredictor(Predictor):
    def __init__(self, turn_history):
        Predictor.__init__(self,turn_history)
        self.singles = {}
        self.doubles = {}
        self.triples = {}
        self.nb= None

    def inner_turn_added(self):
        turn = len(self.turn_history.turns)
        if turn < 2:
            return
        mapping = {'R':0,'P':1,'S':2}
        singledecay = 0.7
        doubledecay = 0.95
        tripledecay = 0.99

        turn = len(self.turn_history.turns)
        lastpair = (self.turn_history.turns[-2][0],self.turn_history.turns[-2][1])

        if lastpair not in self.singles:
            self.singles[lastpair] = [0.0,0.0,0.0,turn-1]
        else:
            decay_factor = pow(singledecay,turn - self.singles[lastpair][3])
            self.singles[lastpair][3] = turn
            self.singles[lastpair][0] *= decay_factor
            self.singles[lastpair][1] *= decay_factor
            self.singles[lastpair][2] *= decay_factor
        self.singles[lastpair][mapping[self.turn_history.turns[-1][1]]] += 1.0

        if turn > 2:
            prevdouble = tuple([(x[0],x[1]) for x in self.turn_history.turns[-3:-1]])
            if prevdouble not in self.doubles:
                self.doubles[prevdouble] = [0.0,0.0,0.0,turn-1]
            else:
                decay_factor = pow(doubledecay,turn - self.doubles[prevdouble][3])
                self.doubles[prevdouble][3] = turn
                self.doubles[prevdouble][0] *= decay_factor
                self.doubles[prevdouble][1] *= decay_factor
                self.doubles[prevdouble][2] *= decay_factor
            self.doubles[prevdouble][mapping[self.turn_history.turns[-1][1]]] += 1.0

        if turn > 3:
            prevtriple = tuple([(x[0],x[1]) for x in self.turn_history.turns[-4:-1]])
            if prevtriple not in self.triples:
                self.triples[prevtriple] = [0.0,0.0,0.0,turn-1]
            else:
                decay_factor = pow(tripledecay,turn - self.triples[prevtriple][3])
                self.triples[prevtriple][3] = turn
                self.triples[prevtriple][0] *= decay_factor
                self.triples[prevtriple][1] *= decay_factor
                self.triples[prevtriple][2] *= decay_factor
            self.triples[prevtriple][mapping[self.turn_history.turns[-1][1]]] += 1.0

    def predict_next_opp_move(self):
        scores = [0.0,0.0,0.0]
        lastpair = (self.turn_history.turns[-1][0],self.turn_history.turns[-1][1])
        if lastpair in self.singles:
            scores[0] += self.singles[lastpair][0]
            scores[1] += self.singles[lastpair][1]
            scores[2] += self.singles[lastpair][2]

        prevdouble = tuple([(x[0],x[1]) for x in self.turn_history.turns[-2:]])
        if prevdouble in self.doubles:
            scores[0] += self.doubles[prevdouble][0]
            scores[1] += self.doubles[prevdouble][1]
            scores[2] += self.doubles[prevdouble][2]

        prevtriple = tuple([(x[0],x[1]) for x in self.turn_history.turns[-3:]])
        if prevtriple in self.triples:
            scores[0] += self.triples[prevtriple][0]
            scores[1] += self.triples[prevtriple][1]
            scores[2] += self.triples[prevtriple][2]

        return self.choose_dist([('R',scores[0]),('P',scores[1]),('S',scores[2])])

class AntiSimplePatternPredictor(SimplePatternPredictor):
    def predict_next_opp_move(self):
        return Predictor.winning_moves[SimplePatternPredictor.predict_next_opp_move(self)]



#The code below is a modified version of part of biopython


#                 Biopython License Agreement
#
#Permission to use, copy, modify, and distribute this software and its
#documentation with or without modifications and for any purpose and
#without fee is hereby granted, provided that any copyright notices
#appear in all copies and that both those copyright notices and this
#permission notice appear in supporting documentation, and that the
#names of the contributors or copyright holders not be used in
#advertising or publicity pertaining to distribution of the software
#without specific prior permission.
#
#THE CONTRIBUTORS AND COPYRIGHT HOLDERS OF THIS SOFTWARE DISCLAIM ALL
#WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE
#CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT
#OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
#OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
#OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
#OR PERFORMANCE OF THIS SOFTWARE.

class NaiveBayes:
    """Holds information for a NaiveBayes classifier.

    Members:
    classes         List of the possible classes of data.
    p_conditional   CLASS x DIM array of dicts of value -> P(value|class,dim)
    p_prior         List of the prior probabilities for every class.
    dimensionality  Dimensionality of the data.

    """
    def __init__(self):
        self.classes = []
        self.p_conditional = None
        self.p_prior = []
        self.dimensionality = None

    def calculate(self, observation, scale=0):
        """calculate(nb, observation[, scale]) -> probability dict

        Calculate log P(class|observation) for each class.  nb is a NaiveBayes
        classifier that has been trained.  observation is a list representing
        the observed data.  scale is whether the probability should be
        scaled by P(observation).  By default, no scaling is done.  The return
        value is a dictionary where the keys is the class and the value is the
        log probability of the class.

        """
        # P(class|observation) = P(observation|class)*P(class)/P(observation)
        # Taking the log:
        # lP(class|observation) = lP(observation|class)+lP(class)-lP(observation)

        # Make sure the observation has the right dimensionality.
        if len(observation) != self.dimensionality:
            raise ValueError, "observation in %d dimension, but classifier in %d" \
                  % (len(observation), self.dimensionality)

        # Calculate log P(observation|class) for every class.
        lp_observation_class = []     # list of log P(observation|class)
        for i in range(len(self.classes)):
            # log P(observation|class) = SUM_i log P(observation_i|class)
            probs = [None] * len(observation)
            for j in range(len(observation)):
                probs[j] = self.p_conditional[i][j].get(observation[j], 0)
            lprobs = [math.log(x) if x > 0 else -10000 for x in probs]
            lprob = sum(lprobs)
            lp_observation_class.append(lprob)

        # Calculate log P(class).
        lp_prior = map(math.log, self.p_prior)

        # Calculate log P(observation).
        lp_observation = 0.0          # P(observation)
        if scale:   # Only calculate this if requested.
            # log P(observation) = log SUM_i P(observation|class_i)P(class_i)
            obs = [ 0.0 for i in range(len(self.classes))]
            for i in range(len(self.classes)):
                obs[i] = math.exp(lp_prior[i]+lp_observation_class[i])
            lp_observation = math.log(sum(obs))

        # Calculate log P(class|observation).
        lp_class_observation = {}      # Dict of class : log P(class|observation)
        for i in range(len(self.classes)):
            lp_class_observation[self.classes[i]] = \
                lp_observation_class[i] + lp_prior[i] - lp_observation

        return lp_class_observation

    def classify(self, observation):
        """classify(nb, observation) -> class

        Classify an observation into a class.

        """
        # The class is the one with the highest probability.
        probs = self.calculate(observation, scale=0)
        max_prob = max_class = None
        for klass in self.classes:
            if max_prob is None or probs[klass] > max_prob:
                max_prob, max_class = probs[klass], klass
        return max_class

    @classmethod
    def train(cls,training_set, results, priors=None, typecode=None):
        """train(training_set, results[, priors]) -> NaiveBayes

        Train a naive bayes classifier on a training set.  training_set is a
        list of observations.  results is a list of the class assignments
        for each observation.  Thus, training_set and results must be the same
        length.  priors is an optional dictionary specifying the prior
        probabilities for each type of result.  If not specified, the priors
        will be estimated from the training results.

        """
        if not len(training_set):
            raise ValueError, "No data in the training set."
        if len(training_set) != len(results):
            raise ValueError, "training_set and results should be parallel lists."

        # If no typecode is specified, try to pick a reasonable one.  If
        # training_set is a Numeric array, then use that typecode.
        # Otherwise, choose a reasonable default.
        # XXX NOT IMPLEMENTED

        # Check to make sure each vector in the training set has the same
        # dimensionality.
        dimensions = [len(x) for x in training_set]
        if min(dimensions) != max(dimensions):
            raise ValueError, "observations have different dimensionality"

        nb = NaiveBayes()
        nb.dimensionality = dimensions[0]

        # Get a list of all the classes.
        nb.classes = list(set(results))
        nb.classes.sort()   # keep it tidy

        # Estimate the prior probabilities for the classes.
        if priors is not None:
            percs = priors
        else:
            percs = {}
            numclasses = len(nb.classes)
            for c in nb.classes:
                percs[c] =  float(results.count(c))/numclasses
            #percs = listfns.contents(results)
        nb.p_prior = [ percs[nb.classes[i]] for i in range(len(nb.classes)) ]

        # Collect all the observations in class.  For each class, make a
        # matrix of training instances versus dimensions.  I might be able
        # to optimize this with Numeric, if the training_set parameter
        # were guaranteed to be a matrix.  However, this may not be the
        # case, because the client may be hacking up a sparse matrix or
        # something.
        c2i = dict([(c,nb.classes.index(c)) for c in nb.classes])      # class to index of class
        observations = [[] for c in nb.classes]  # separate observations by class
        for i in range(len(results)):
            klass, obs = results[i], training_set[i]
            observations[c2i[klass]].append(obs)
        # Now make the observations Numeric matrics.
        #for i in range(len(observations)):
            # XXX typecode must be specified!
        #    observations[i] = array.array(typecode,observations[i])

        # Calculate P(value|class,dim) for every class.
        # This is a good loop to optimize.
        nb.p_conditional = []
        for i in range(len(nb.classes)):
            class_observations = observations[i]   # observations for this class
            nb.p_conditional.append([None] * nb.dimensionality)
            for j in range(nb.dimensionality):
                # Collect all the values in this dimension.
                values = [x[j] for x in class_observations]

                # Add pseudocounts here.  This needs to be parameterized.
                #values = list(values) + range(len(nb.classes))  # XXX add 1

                # Estimate P(value|class,dim)
                d = {}
                unique_values = list(set(values))
                numvalues = float(len(unique_values))
                for v in unique_values:
                    d[v] =  float(values.count(v))/numvalues
                nb.p_conditional[i][j] = d
        return nb
#End of Biopython-derived code

winning_moves = {'R':'P','P':'S','S':'R'}

if input == "":
    turn_history = TurnHistory()
    predictors = [ RecentInputDistributionPredictor(turn_history),
#RecentInputsPredictor(turn_history),
    LastInputPredictor(turn_history), LastOutputPredictor(turn_history),
    IOPairsPredictor(turn_history),
    SimplePatternPredictor(turn_history)]
#Someething is broken with inheritance here
#,AntiSimplePatternPredictor(turn_history),
# AntiLastInputPredictor(turn_history),AntiIOPairsPredictor(turn_history)]

    output = random.choice(["R","P","S"])
else:
    turn_history.append_turn(output,input)
    
    for predictor in predictors:
        predictor.turn_added()

    scores = []
    for predictor in predictors:
        scores.append(predictor.get_total_score() + predictor.get_prediction_score()*1000)

    m = max(scores)
    move = predictors[scores.index(m)].predict_next_opp_move()
    output = winning_moves[move]