Skip to content

Commit

Permalink
Expose first_step and max_accel parameters to numerical continuat…
Browse files Browse the repository at this point in the history
…ion for QRE tracing.

This adds access to these parameters from pygambit.  In addition to customising behaviour of the function,
changing these from defaults can help prevent the path-following from getting stuck by landing
too close to a bifurcation point.
  • Loading branch information
tturocy committed Apr 3, 2024
1 parent 8b4fec6 commit c29d9a1
Show file tree
Hide file tree
Showing 7 changed files with 68 additions and 23 deletions.
2 changes: 2 additions & 0 deletions ChangeLog
Expand Up @@ -28,6 +28,8 @@
- Some caching added to payoff/strategy value calculations in `MixedStrategyProfile`
- `gambit-simpdiv` now supports expressing output as floating-point with a specified number of
digits (#296)
- Parameters `first_step` and `max_accel` added to `gambit_logit` for finer control of
numerical continuation process

### Changed
- Gambit now requires a compiler that supports C++17.
Expand Down
16 changes: 11 additions & 5 deletions src/pygambit/gambit.pxd
Expand Up @@ -456,10 +456,16 @@ cdef extern from "solvers/gnm/gnm.h":
) except +RuntimeError

cdef extern from "solvers/logit/nfglogit.h":
c_List[c_MixedStrategyProfileDouble] LogitStrategySolve(c_Game, double) except +RuntimeError
c_List[c_MixedStrategyProfileDouble] LogitStrategySolve(c_Game,
double,
double,
double) except +RuntimeError

cdef extern from "solvers/logit/efglogit.h":
c_List[c_MixedBehaviorProfileDouble] LogitBehaviorSolve(c_Game, double) except +RuntimeError
c_List[c_MixedBehaviorProfileDouble] LogitBehaviorSolve(c_Game,
double,
double,
double) except +RuntimeError

cdef extern from "solvers/logit/nfglogit.h":
cdef cppclass c_LogitQREMixedStrategyProfile "LogitQREMixedStrategyProfile":
Expand All @@ -480,11 +486,11 @@ cdef extern from "solvers/logit/nfglogit.h":

cdef extern from "nash.h":
shared_ptr[c_LogitQREMixedStrategyProfile] _logit_estimate "logit_estimate"(
shared_ptr[c_MixedStrategyProfileDouble]
shared_ptr[c_MixedStrategyProfileDouble], double, double
) except +
shared_ptr[c_LogitQREMixedStrategyProfile] _logit_atlambda "logit_atlambda"(
c_Game, double
c_Game, double, double, double
) except +
c_List[c_LogitQREMixedStrategyProfile] _logit_principal_branch "logit_principal_branch"(
c_Game, double
c_Game, double, double, double
) except +
15 changes: 12 additions & 3 deletions src/pygambit/nash.h
Expand Up @@ -32,31 +32,40 @@ class NullBuffer : public std::streambuf {
};

std::shared_ptr<LogitQREMixedStrategyProfile>
logit_estimate(std::shared_ptr<MixedStrategyProfile<double>> p_frequencies)
logit_estimate(std::shared_ptr<MixedStrategyProfile<double>> p_frequencies, double p_firstStep,
double p_maxAccel)
{
LogitQREMixedStrategyProfile start(p_frequencies->GetGame());
StrategicQREEstimator alg;
alg.SetMaxDecel(p_maxAccel);
alg.SetStepsize(p_firstStep);
NullBuffer null_buffer;
std::ostream null_stream(&null_buffer);
LogitQREMixedStrategyProfile result =
alg.Estimate(start, *p_frequencies, null_stream, 1000000.0, 1.0);
return make_shared<LogitQREMixedStrategyProfile>(result);
}

std::shared_ptr<LogitQREMixedStrategyProfile> logit_atlambda(const Game &p_game, double p_lambda)
std::shared_ptr<LogitQREMixedStrategyProfile> logit_atlambda(const Game &p_game, double p_lambda,
double p_firstStep, double p_maxAccel)
{
LogitQREMixedStrategyProfile start(p_game);
StrategicQREPathTracer alg;
alg.SetMaxDecel(p_maxAccel);
alg.SetStepsize(p_firstStep);
NullBuffer null_buffer;
std::ostream null_stream(&null_buffer);
return make_shared<LogitQREMixedStrategyProfile>(
alg.SolveAtLambda(start, null_stream, p_lambda, 1.0));
}

List<LogitQREMixedStrategyProfile> logit_principal_branch(const Game &p_game, double p_maxregret)
List<LogitQREMixedStrategyProfile> logit_principal_branch(const Game &p_game, double p_maxregret,
double p_firstStep, double p_maxAccel)
{
LogitQREMixedStrategyProfile start(p_game);
StrategicQREPathTracer alg;
alg.SetMaxDecel(p_maxAccel);
alg.SetStepsize(p_firstStep);
NullBuffer null_buffer;
std::ostream null_stream(&null_buffer);
return alg.TraceStrategicPath(start, null_stream, p_maxregret, 1.0);
Expand Down
29 changes: 19 additions & 10 deletions src/pygambit/nash.pxi
Expand Up @@ -182,12 +182,16 @@ def _gnm_strategy_solve(
raise


def _logit_strategy_solve(game: Game, maxregret: float) -> typing.List[MixedStrategyProfileDouble]:
return _convert_mspd(LogitStrategySolve(game.game, maxregret))
def _logit_strategy_solve(
game: Game, maxregret: float, first_step: float, max_accel: float,
) -> typing.List[MixedStrategyProfileDouble]:
return _convert_mspd(LogitStrategySolve(game.game, maxregret, first_step, max_accel))


def _logit_behavior_solve(game: Game, maxregret: float) -> typing.List[MixedBehaviorProfileDouble]:
return _convert_mbpd(LogitBehaviorSolve(game.game, maxregret))
def _logit_behavior_solve(
game: Game, maxregret: float, first_step: float, max_accel: float,
) -> typing.List[MixedBehaviorProfileDouble]:
return _convert_mbpd(LogitBehaviorSolve(game.game, maxregret, first_step, max_accel))


@cython.cclass
Expand Down Expand Up @@ -236,26 +240,31 @@ class LogitQREMixedStrategyProfile:
return profile


def logit_estimate(profile: MixedStrategyProfileDouble) -> LogitQREMixedStrategyProfile:
def logit_estimate(profile: MixedStrategyProfileDouble,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedStrategyProfile:
"""Estimate QRE corresponding to mixed strategy profile using
maximum likelihood along the principal branch.
"""
ret = LogitQREMixedStrategyProfile()
ret.thisptr = _logit_estimate(profile.profile)
ret.thisptr = _logit_estimate(profile.profile, first_step, max_accel)
return ret


def logit_atlambda(game: Game, lam: float) -> LogitQREMixedStrategyProfile:
def logit_atlambda(game: Game,
lam: float,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedStrategyProfile:
"""Compute the first QRE along the principal branch with the given
lambda parameter.
"""
ret = LogitQREMixedStrategyProfile()
ret.thisptr = _logit_atlambda(game.game, lam)
ret.thisptr = _logit_atlambda(game.game, lam, first_step, max_accel)
return ret


def logit_principal_branch(game: Game):
solns = _logit_principal_branch(game.game, 1.0e-8)
def logit_principal_branch(game: Game, first_step: float = .03, max_accel: float = 1.1):
solns = _logit_principal_branch(game.game, 1.0e-8, first_step, max_accel)
ret = []
for i in range(solns.Length()):
p = LogitQREMixedStrategyProfile()
Expand Down
19 changes: 16 additions & 3 deletions src/pygambit/nash.py
Expand Up @@ -531,6 +531,8 @@ def logit_solve(
game: libgbt.Game,
use_strategic: bool = False,
maxregret: float = 1.0e-8,
first_step: float = .03,
max_accel: float = 1.1,
) -> NashComputationResult:
"""Compute Nash equilibria of a game using :ref:`the logit quantal response
equilibrium correspondence <gambit-logit>`.
Expand All @@ -552,7 +554,17 @@ def logit_solve(
regret of any player must be no more than `maxregret` times the
difference of the maximum and minimum payoffs of the game
.. versionadded: 16.2.0
.. versionadded:: 16.2.0
first_step : float, default .03
The arclength of the initial step.
.. versionadded:: 16.2.0
max_accel : float, default 1.1
The maximum rate at which to lengthen the arclength step size.
.. versionadded:: 16.2.0
Returns
-------
Expand All @@ -562,15 +574,16 @@ def logit_solve(
if maxregret <= 0.0:
raise ValueError("logit_solve(): maxregret argument must be positive")
if not game.is_tree or use_strategic:
equilibria = libgbt._logit_strategy_solve(game, maxregret)
equilibria = libgbt._logit_strategy_solve(game, maxregret, first_step, max_accel)
else:
equilibria = libgbt._logit_behavior_solve(game, maxregret)
equilibria = libgbt._logit_behavior_solve(game, maxregret, first_step, max_accel)
return NashComputationResult(
game=game,
method="logit",
rational=False,
use_strategic=not game.is_tree or use_strategic,
equilibria=equilibria,
parameters={"first_step": first_step, "max_accel": max_accel},
)


Expand Down
5 changes: 4 additions & 1 deletion src/solvers/logit/efglogit.h
Expand Up @@ -74,9 +74,12 @@ class AgentQREPathTracer : public PathTracer {
class LambdaCriterion;
};

inline List<MixedBehaviorProfile<double>> LogitBehaviorSolve(const Game &p_game, double p_epsilon)
inline List<MixedBehaviorProfile<double>> LogitBehaviorSolve(const Game &p_game, double p_epsilon,
double p_firstStep, double p_maxAccel)
{
AgentQREPathTracer tracer;
tracer.SetMaxDecel(p_maxAccel);
tracer.SetStepsize(p_firstStep);
tracer.SetFullGraph(false);
std::ostringstream ostream;
auto result =
Expand Down
5 changes: 4 additions & 1 deletion src/solvers/logit/nfglogit.h
Expand Up @@ -103,9 +103,12 @@ class StrategicQREEstimator : public StrategicQREPathTracer {
class CallbackFunction;
};

inline List<MixedStrategyProfile<double>> LogitStrategySolve(const Game &p_game, double p_regret)
inline List<MixedStrategyProfile<double>> LogitStrategySolve(const Game &p_game, double p_regret,
double p_firstStep, double p_maxAccel)
{
StrategicQREPathTracer tracer;
tracer.SetMaxDecel(p_maxAccel);
tracer.SetStepsize(p_firstStep);
tracer.SetFullGraph(false);
std::ostringstream ostream;
auto result =
Expand Down

0 comments on commit c29d9a1

Please sign in to comment.