prologin2014_ranking

Scripts to compute the final ranking of Prologin 2014
git clone https://esimon.eu/repos/prologin2014_ranking
Log | Files | Refs | README

commit 10c4d0f280921a79b3c89e44b41bd1eb39d49ede
Author: Étienne Simon <esimon@esimon.eu>
Date:   Mon, 19 May 2014 16:47:42 +0200

Initial commit

Diffstat:
Ainit.py | 20++++++++++++++++++++
Arank.py | 169+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atop.py | 27+++++++++++++++++++++++++++
3 files changed, 216 insertions(+), 0 deletions(-)

diff --git a/init.py b/init.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 + +import os +import sys +import pickle +from trueskill import Rating + +os.environ['DJANGO_SETTINGS_MODULE'] = 'prologin.concours.settings' +sys.path.insert(0, '/root/sadm/python-lib') +from django.contrib.auth.models import User +from prologin.concours.stechec.models import * + +def has_bot(user): + return len(Champion.objects.filter(author=user)) > 0 + +filepath=sys.argv[1] +with open(filepath, 'wb') as f: + users = User.objects.filter(is_staff=False) + ratings = dict([(user.username, Rating()) for user in users if has_bot(user)]) + pickle.dump(ratings, f) diff --git a/rank.py b/rank.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 + +import os +import sys +import time +from math import log, erf, sqrt +from random import shuffle +import pickle +from trueskill import Rating, quality, rate + +os.environ['DJANGO_SETTINGS_MODULE'] = 'prologin.concours.settings' +sys.path.insert(0, '/root/sadm/python-lib') +from django.contrib.auth.models import User +from prologin.concours.stechec.models import * + +sample_size = 1 +party_size = 4 +conservativeness = 3 +max_concurrent_matches = 250 +concurrent_match_per_player = 50 +min_ppp_margin = 2 +update_score_frequency = 100 +tournament_name='Tournoi final' + +filepath=sys.argv[1] +with open(filepath, 'rb') as f: + ratings = pickle.load(f) + +current_matches = [] +nb_match_per_player = {} +champions = {} +users = {} +for user in ratings.keys(): + champion = Champion.objects.filter(author__username=user).order_by('-ts').exclude(deleted=True)[0].pk + nb_match_per_player[user] = 0 + champions[user] = champion + users[champion] = user + +def launch_match(cs): + match = Match() + match.author = User.objects.get(username='epsilon012') + match.tournament = Tournament.objects.get(name=tournament_name) + match.save() + + for c in cs: + mc = MatchPlayer() + mc.champion = Champion.objects.get(pk=c) + mc.match = match + mc.save() + + match.status = 'new' + match.save() + + return match.pk + +def is_match_complete(match): + m = Match.objects.filter(pk=match) + return m.status == 'done' + +def get_match_results(match): + players = MatchPlayer.objects.filter(match__pk=match) + scores = dict((p.champion.pk, p.score) for p in players) + return scores + +def do_match(party): + for member in party: + nb_match_per_player[member] += 1 + party_champ = [ champions[member] for member in party ] + pk=launch_match(party_champ) + current_matches.append(pk) + return pk + +def conservative_estimate(key): + return ratings[key].mu - conservativeness * ratings[key].sigma + +def can_enter_match(user): + return nb_match_per_player[user] < concurrent_match_per_player + +def match_making(): + min_ppp = min(nb_match_per_player.values()) + pick_from_me = [ user for user in ratings.keys() if nb_match_per_player[user] <= min_ppp + min_ppp_margin] + + ready = list(filter(can_enter_match, ratings.keys())) + fuzziest = max(pick_from_me, key=lambda x: ratings[x].sigma) + ready.remove(fuzziest) + sample_me = [] + + for _ in range(sample_size): + shuffle(ready) + sample_me += ready[:len(ready)//(party_size-1)*(party_size-1)] + + max_qual = -1 + max_party = None + + for i in range(len(sample_me)//(party_size-1)): + party_minus_1 = sample_me[(party_size-1)*i:(party_size-1)*(i+1)] + party = party_minus_1 + [ fuzziest ] + gr = [[ratings[member]] for member in party] + qual = quality(gr) + + if qual > max_qual: + max_qual = qual + max_party = party + + return max_party + +def update(scores): + for key in scores: + nb_match_per_player[key] -= 1 + scores[key] = -scores[key] + + if not any(scores.values()): + return + + new_rating = rate([(ratings[key],) for key in scores], scores.values()) + for key, value in zip(scores.keys(), new_rating): + ratings[key] = value[0] + + with open(filepath, 'wb') as f: + pickle.dump(ratings, f) + +def update_scores(): + global current_matches + remove_me = [] + for match_pk in current_matches: + match = Match.objects.filter(pk=match_pk)[0] + if match.status == 'done': + print("Match {0:<5} done".format(match_pk)) + remove_me.append(match_pk) + scores_champ = get_match_results(match_pk) + update(dict((users[key], scores_champ[key]) for key in scores_champ)) + current_matches = [ item for item in current_matches if item not in remove_me ] + +if(len(sys.argv)>2): + N = int(sys.argv[2]) +else: + n = len(ratings) + N = int(round(n*log(n, 2))) + +for epoch in range(N): + party = match_making() + if party is None: + print("Epoch {0:<5} No match".format(epoch)) + time.sleep(3) + else: + id_match = do_match(party) + print("Epoch {0:<5} id {1:<5} party {2}".format(epoch, id_match, party)) + + while len(current_matches) >= max_concurrent_matches: + update_scores() + time.sleep(3) + + if (epoch+1) % update_score_frequency == 0: + update_scores() + +while len(current_matches) > 0: + time.sleep(5) + update_scores() + +leaderboard = list(ratings.keys()) +#leaderboard.sort(key=conservative_estimate, reverse=True) +leaderboard.sort(key=lambda x: ratings[x].mu, reverse=True) +for (position, champion) in enumerate(leaderboard): + if position!=len(leaderboard)-1: + std = (ratings[leaderboard[position]].mu - ratings[leaderboard[position+1]].mu) / (ratings[leaderboard[position]].sigma) + std = erf(std/sqrt(2)) + else: + std = '-' + print("{0:<2} champion {1:<15} estimate {2:<20} mu {3:<20} sigma {4:<20} prob {5:<20}".format(position+1, champion, conservative_estimate(champion), ratings[champion].mu, ratings[champion].sigma, std)) diff --git a/top.py b/top.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 + +import os +import sys +import pickle +from trueskill import Rating + +os.environ['DJANGO_SETTINGS_MODULE'] = 'prologin.concours.settings' +sys.path.insert(0, '/root/sadm/python-lib') +from django.contrib.auth.models import User +from prologin.concours.stechec.models import * + +filepath=sys.argv[1] +with open(filepath, 'rb') as f: + ratings = pickle.load(f) +filepath=sys.argv[2] + +limit=int(sys.argv[3]) +leaderboard = list(ratings.keys()) +#leaderboard.sort(key=conservative_estimate, reverse=True) +leaderboard.sort(key=lambda x: ratings[x].mu, reverse=True) + +selected = leaderboard[0:limit] +new_ratings = dict( (key,ratings[key]) for key in selected ) + +with open(filepath, 'wb') as f: + pickle.dump(new_ratings, f)