Skip to content

Commit

Permalink
Implement estimation of agent LQRE in pygambit.
Browse files Browse the repository at this point in the history
  • Loading branch information
tturocy committed Apr 16, 2024
1 parent 777523b commit f24845c
Show file tree
Hide file tree
Showing 11 changed files with 468 additions and 61 deletions.
7 changes: 5 additions & 2 deletions doc/pygambit.api.rst
Expand Up @@ -296,6 +296,9 @@ Computation of quantal response equilibria
.. autosummary::
:toctree: api/

fit_empirical
fit_fixedpoint
fit_strategy_empirical
fit_strategy_fixedpoint
LogitQREMixedStrategyFitResult

fit_behavior_fixedpoint
LogitQREMixedBehaviorFitResult
4 changes: 2 additions & 2 deletions doc/pygambit.user.rst
Expand Up @@ -704,11 +704,11 @@ analysed in [McKPal95]_ using QRE.
)
data = g.mixed_strategy_profile([[128*0.527, 128*(1-0.527)], [128*0.366, 128*(1-0.366)]])
Estimation of QRE is done using :py:func:`.fit_fixedpoint`.
Estimation of QRE in the strategic form is done using :py:func:`.fit_strategy_fixedpoint`.

.. ipython:: python
fit = gbt.qre.fit_fixedpoint(data)
fit = gbt.qre.fit_strategy_fixedpoint(data)
The returned :py:class:`.LogitQREMixedStrategyFitResult` object contains the results of the
estimation.
Expand Down
19 changes: 14 additions & 5 deletions src/games/behavmixed.cc
Expand Up @@ -136,12 +136,21 @@ template <class T>
MixedBehaviorProfile<T> &
MixedBehaviorProfile<T>::operator=(const MixedBehaviorProfile<T> &p_profile)
{
if (this != &p_profile && m_support == p_profile.m_support) {
InvalidateCache();
m_probs = p_profile.m_probs;
m_support = p_profile.m_support;
m_gameversion = p_profile.m_gameversion;
if (this == &p_profile) {
return *this;
}
if (m_support != p_profile.m_support) {
throw MismatchException();
}
InvalidateCache();
m_probs = p_profile.m_probs;
m_gameversion = p_profile.m_gameversion;
map_realizProbs = p_profile.map_realizProbs;
map_beliefs = p_profile.map_beliefs;
map_nodeValues = p_profile.map_nodeValues;
map_infosetValues = p_profile.map_infosetValues;
map_actionValues = p_profile.map_actionValues;
map_regret = p_profile.map_regret;
return *this;
}

Expand Down
32 changes: 24 additions & 8 deletions src/pygambit/gambit.pxd
Expand Up @@ -455,13 +455,17 @@ cdef extern from "solvers/gnm/gnm.h":
int p_localNewtonInterval, int p_localNewtonMaxits
) except +RuntimeError

cdef extern from "solvers/logit/nfglogit.h":
c_List[c_MixedStrategyProfileDouble] LogitStrategySolve(c_Game,
double,
double,
double) except +RuntimeError

cdef extern from "solvers/logit/efglogit.h":
cdef cppclass c_LogitQREMixedBehaviorProfile "LogitQREMixedBehaviorProfile":
c_LogitQREMixedBehaviorProfile(c_Game) except +
c_LogitQREMixedBehaviorProfile(c_LogitQREMixedBehaviorProfile) except +
c_Game GetGame() except +
c_MixedBehaviorProfileDouble GetProfile() # except + doesn't compile
double GetLambda() except +
double GetLogLike() except +
int BehaviorProfileLength() except +
double getitem "operator[]"(int) except +IndexError

c_List[c_MixedBehaviorProfileDouble] LogitBehaviorSolve(c_Game,
double,
double,
Expand All @@ -484,11 +488,23 @@ cdef extern from "solvers/logit/nfglogit.h":
c_MixedStrategyProfileDouble,
double, double, double) except +RuntimeError

c_List[c_MixedStrategyProfileDouble] LogitStrategySolve(c_Game,
double,
double,
double) except +RuntimeError


cdef extern from "nash.h":
shared_ptr[c_LogitQREMixedStrategyProfile] _logit_estimate "logit_estimate"(
shared_ptr[c_LogitQREMixedBehaviorProfile] LogitBehaviorEstimateHelper(
shared_ptr[c_MixedBehaviorProfileDouble], double, double
) except +
shared_ptr[c_LogitQREMixedBehaviorProfile] LogitBehaviorAtLambdaHelper(
c_Game, double, double, double
) except +
shared_ptr[c_LogitQREMixedStrategyProfile] LogitStrategyEstimateHelper(
shared_ptr[c_MixedStrategyProfileDouble], double, double
) except +
shared_ptr[c_LogitQREMixedStrategyProfile] _logit_atlambda "logit_atlambda"(
shared_ptr[c_LogitQREMixedStrategyProfile] LogitStrategyAtLambdaHelper(
c_Game, double, double, double
) except +
c_List[c_LogitQREMixedStrategyProfile] _logit_principal_branch "logit_principal_branch"(
Expand Down
39 changes: 29 additions & 10 deletions src/pygambit/nash.h
Expand Up @@ -21,6 +21,7 @@
//

#include "gambit.h"
#include "solvers/logit/efglogit.h"
#include "solvers/logit/nfglogit.h"

using namespace std;
Expand All @@ -31,23 +32,41 @@ class NullBuffer : public std::streambuf {
int overflow(int c) { return c; }
};

std::shared_ptr<LogitQREMixedStrategyProfile>
logit_estimate(std::shared_ptr<MixedStrategyProfile<double>> p_frequencies, double p_firstStep,
double p_maxAccel)
std::shared_ptr<LogitQREMixedBehaviorProfile>
LogitBehaviorEstimateHelper(std::shared_ptr<MixedBehaviorProfile<double>> p_frequencies,
double p_firstStep, double p_maxAccel)
{
LogitQREMixedStrategyProfile start(p_frequencies->GetGame());
StrategicQREEstimator alg;
return make_shared<LogitQREMixedBehaviorProfile>(
LogitBehaviorEstimate(*p_frequencies, p_firstStep, p_maxAccel));
}

std::shared_ptr<LogitQREMixedBehaviorProfile> LogitBehaviorAtLambdaHelper(const Game &p_game,
double p_lambda,
double p_firstStep,
double p_maxAccel)
{
LogitQREMixedBehaviorProfile start(p_game);
AgentQREPathTracer alg;
alg.SetMaxDecel(p_maxAccel);
alg.SetStepsize(p_firstStep);
NullBuffer null_buffer;
std::ostream null_stream(&null_buffer);
LogitQREMixedStrategyProfile result =
alg.Estimate(start, *p_frequencies, null_stream, 1000000.0, 1.0);
return make_shared<LogitQREMixedStrategyProfile>(result);
return make_shared<LogitQREMixedBehaviorProfile>(
alg.SolveAtLambda(start, null_stream, p_lambda, 1.0));
}

std::shared_ptr<LogitQREMixedStrategyProfile>
LogitStrategyEstimateHelper(std::shared_ptr<MixedStrategyProfile<double>> p_frequencies,
double p_firstStep, double p_maxAccel)
{
return make_shared<LogitQREMixedStrategyProfile>(
LogitStrategyEstimate(*p_frequencies, p_firstStep, p_maxAccel));
}

std::shared_ptr<LogitQREMixedStrategyProfile> logit_atlambda(const Game &p_game, double p_lambda,
double p_firstStep, double p_maxAccel)
std::shared_ptr<LogitQREMixedStrategyProfile> LogitStrategyAtLambdaHelper(const Game &p_game,
double p_lambda,
double p_firstStep,
double p_maxAccel)
{
LogitQREMixedStrategyProfile start(p_game);
StrategicQREPathTracer alg;
Expand Down
93 changes: 81 additions & 12 deletions src/pygambit/nash.pxi
Expand Up @@ -240,26 +240,26 @@ class LogitQREMixedStrategyProfile:
return profile


def logit_estimate(profile: MixedStrategyProfileDouble,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedStrategyProfile:
def _logit_strategy_estimate(profile: MixedStrategyProfileDouble,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedStrategyProfile:
"""Estimate QRE corresponding to mixed strategy profile using
maximum likelihood along the principal branch.
"""
ret = LogitQREMixedStrategyProfile()
ret.thisptr = _logit_estimate(profile.profile, first_step, max_accel)
ret = LogitQREMixedStrategyProfile(profile.game)
ret.thisptr = LogitStrategyEstimateHelper(profile.profile, first_step, max_accel)
return ret


def logit_atlambda(game: Game,
lam: float,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedStrategyProfile:
"""Compute the first QRE along the principal branch with the given
lambda parameter.
def logit_strategy_atlambda(game: Game,
lam: float,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedStrategyProfile:
"""Compute the first QRE encountered along the principal branch of the strategic
game corresponding to lambda value `lam`.
"""
ret = LogitQREMixedStrategyProfile()
ret.thisptr = _logit_atlambda(game.game, lam, first_step, max_accel)
ret.thisptr = LogitStrategyAtLambdaHelper(game.game, lam, first_step, max_accel)
return ret


Expand All @@ -271,3 +271,72 @@ def logit_principal_branch(game: Game, first_step: float = .03, max_accel: float
p.thisptr = copyitem_list_qrem(solns, i+1)
ret.append(p)
return ret


@cython.cclass
class LogitQREMixedBehaviorProfile:
thisptr = cython.declare(shared_ptr[c_LogitQREMixedBehaviorProfile])

def __init__(self, game=None):
if game is not None:
self.thisptr = make_shared[c_LogitQREMixedBehaviorProfile](
cython.cast(Game, game).game
)

def __repr__(self):
return f"LogitQREMixedBehaviorProfile(lam={self.lam},profile={self.profile})"

def __len__(self):
return deref(self.thisptr).BehaviorProfileLength()

def __getitem__(self, int i):
return deref(self.thisptr).getitem(i+1)

@property
def game(self) -> Game:
"""The game on which this mixed strategy profile is defined."""
g = Game()
g.game = deref(self.thisptr).GetGame()
return g

@property
def lam(self) -> double:
"""The value of the precision parameter."""
return deref(self.thisptr).GetLambda()

@property
def log_like(self) -> double:
"""The log-likelihood of the data."""
return deref(self.thisptr).GetLogLike()

@property
def profile(self) -> MixedBehaviorProfileDouble:
"""The mixed strategy profile."""
profile = MixedBehaviorProfileDouble()
profile.profile = (
make_shared[c_MixedBehaviorProfileDouble](deref(self.thisptr).GetProfile())
)
return profile


def _logit_behavior_estimate(profile: MixedBehaviorProfileDouble,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedBehaviorProfile:
"""Estimate QRE corresponding to mixed behavior profile using
maximum likelihood along the principal branch.
"""
ret = LogitQREMixedBehaviorProfile(profile.game)
ret.thisptr = LogitBehaviorEstimateHelper(profile.profile, first_step, max_accel)
return ret


def logit_behavior_atlambda(game: Game,
lam: float,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedBehaviorProfile:
"""Compute the first QRE encountered along the principal branch of the extensive
game corresponding to lambda value `lam`.
"""
ret = LogitQREMixedBehaviorProfile()
ret.thisptr = LogitBehaviorAtLambdaHelper(game.game, lam, first_step, max_accel)
return ret
3 changes: 2 additions & 1 deletion src/pygambit/nash.py
Expand Up @@ -587,5 +587,6 @@ def logit_solve(
)


logit_atlambda = libgbt.logit_atlambda
logit_behavior_atlambda = libgbt.logit_behavior_atlambda
logit_strategy_atlambda = libgbt.logit_strategy_atlambda
logit_principal_branch = libgbt.logit_principal_branch

0 comments on commit f24845c

Please sign in to comment.