supermarkov

Authorjermenkoo
Submission date2015-11-14 23:02:02.677311
Rating4873
Matches played489
Win rate54.81

Use rpsrunner.py to play unranked matches on your computer.

Source code:

import collections
import random
import re

MOVES = 2


class MarkovChain:
    history = ""
    win = {"R": "P", "P": "S", "S": "R"}

    def learn(self, i):
        self.history += i

    def decide(self):
        p = []
        if len(self.history) < MOVES:
            return self.win[random.choice(["R", "P", "S"])]
        else:
            idx = [m.start()
                   for m in re.finditer(self.history[:-MOVES], self.history)]
            p = [self.history[i + MOVES] for i in idx]
            c = collections.Counter(p)
            return c.most_common[0][1]

m = MarkovChain()
m.learn(input)
output = m.decide()