Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changes/176.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix PlackettLuce rating adjustment for ties.
2 changes: 1 addition & 1 deletion openskill/models/weng_lin/plackett_luce.py
Original file line number Diff line number Diff line change
Expand Up @@ -848,7 +848,7 @@ def _compute(
delta += (
i_mu_over_ce_over_sum_q * (1 - i_mu_over_ce_over_sum_q) / a[q]
)
if team_q.rank == team_i.rank:
if q == i:
omega += (1 - i_mu_over_ce_over_sum_q) / a[q]
else:
omega -= i_mu_over_ce_over_sum_q / a[q]
Expand Down
18 changes: 9 additions & 9 deletions tests/models/data/plackettluce.json
Original file line number Diff line number Diff line change
Expand Up @@ -86,21 +86,21 @@
],
"team_2": [
{
"mu": 23.083700519022745,
"mu": 20.599656070925114,
"sigma": 8.222674397835641
},
{
"mu": 21.717476072569045,
"mu": 20.47545384852023,
"sigma": 8.277466291367071
}
],
"team_3": [
{
"mu": 21.717476072569045,
"mu": 20.47545384852023,
"sigma": 8.277466291367071
},
{
"mu": 23.083700519022745,
"mu": 20.599656070925114,
"sigma": 8.222674397835641
}
],
Expand Down Expand Up @@ -160,7 +160,7 @@
"ties": {
"team_1": [
{
"mu": 23.241111866333558,
"mu": 21.63766806988004,
"sigma": 8.310709773172306
}
],
Expand All @@ -176,15 +176,15 @@
],
"team_3": [
{
"mu": 21.479966996534408,
"mu": 19.87652320008089,
"sigma": 8.237522411103104
},
{
"mu": 21.479966996534408,
"mu": 19.87652320008089,
"sigma": 8.237522411103104
},
{
"mu": 21.479966996534408,
"mu": 19.87652320008089,
"sigma": 8.237522411103104
}
]
Expand Down Expand Up @@ -261,4 +261,4 @@
}
]
}
}
}
58 changes: 45 additions & 13 deletions tests/models/weng_lin/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,20 +205,52 @@ def test_ladder_pairs():

@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tie_score", [-1, 0, 0.1, 10, 13.4])
def test_ties(model, tie_score):
@pytest.mark.parametrize("num_teams", [2, 5, 10])
@pytest.mark.parametrize("team_size", [1, 2, 5, 10])
@pytest.mark.parametrize("tie_type", ["score", "rank"])
def test_ties(model, tie_score, num_teams, team_size, tie_type) -> None:
model_instance = model()
teams = [
[model_instance.rating() for _ in range(team_size)] for _ in range(num_teams)
]
player_mu_before = [player.mu for team in teams for player in team]
assert all(
mu == player_mu_before[0] for mu in player_mu_before
), f"Model {model.__name__} with score {tie_score}: All players should start with equal mu"

player_sigma_before = [player.sigma for team in teams for player in team]
assert all(
sigma == player_sigma_before[0] for sigma in player_sigma_before
), f"Model {model.__name__} with score {tie_score}: All players should start with equal sigma"

if tie_type == "score":
scores = [tie_score for _ in range(num_teams)]
new_teams = model_instance.rate(teams, scores=scores)
else: # rank
ranks = [tie_score for _ in range(num_teams)]
new_teams = model_instance.rate(teams, ranks=ranks)

player_mu_after = [player.mu for team in new_teams for player in team]
assert all(
mu_after == mu_before
for mu_after, mu_before in zip(player_mu_after, player_mu_before)
), f"Model {model.__name__} with score {tie_score}: All players should end with equal mu"
player_sigma_after = [player.sigma for team in new_teams for player in team]
assert all(
sigma_after <= sigma_before
for sigma_after, sigma_before in zip(player_sigma_after, player_sigma_before)
), f"Model {model.__name__} with score {tie_score}: All players should end with lower or equal sigma"

player_1 = model_instance.rating()
player_2 = model_instance.rating()

result = model_instance.rate(
[[player_1], [player_2]], scores=[tie_score, tie_score]
)
@pytest.mark.parametrize("model", MODELS)
def test_ties_with_close_ratings(model) -> None:
model_instance = model()

player_1 = model_instance.rating(mu=30)
player_2 = model_instance.rating(mu=20)

new_teams = model_instance.rate([[player_1], [player_2]], ranks=[0, 0])

# Both players should have the same rating change
assert (
result[0][0].mu == result[1][0].mu
), f"Model {model.__name__} with score {tie_score}: Players should have equal mu after tie"
assert (
result[0][0].sigma == result[1][0].sigma
), f"Model {model.__name__} with score {tie_score}: Players should have equal sigma after tie"
# ratings should converge on ties.
assert new_teams[0][0].mu < 30
assert new_teams[1][0].mu > 20