diff --git a/doc/pygambit.api.rst b/doc/pygambit.api.rst index e1a8a1710..bc98fc63e 100644 --- a/doc/pygambit.api.rst +++ b/doc/pygambit.api.rst @@ -296,6 +296,9 @@ Computation of quantal response equilibria .. autosummary:: :toctree: api/ - fit_empirical - fit_fixedpoint + fit_strategy_empirical + fit_strategy_fixedpoint LogitQREMixedStrategyFitResult + + fit_behavior_fixedpoint + LogitQREMixedBehaviorFitResult diff --git a/doc/pygambit.user.rst b/doc/pygambit.user.rst index 23574f6bc..7bf5be199 100644 --- a/doc/pygambit.user.rst +++ b/doc/pygambit.user.rst @@ -704,11 +704,11 @@ analysed in [McKPal95]_ using QRE. ) data = g.mixed_strategy_profile([[128*0.527, 128*(1-0.527)], [128*0.366, 128*(1-0.366)]]) -Estimation of QRE is done using :py:func:`.fit_fixedpoint`. +Estimation of QRE in the strategic form is done using :py:func:`.fit_strategy_fixedpoint`. .. ipython:: python - fit = gbt.qre.fit_fixedpoint(data) + fit = gbt.qre.fit_strategy_fixedpoint(data) The returned :py:class:`.LogitQREMixedStrategyFitResult` object contains the results of the estimation. diff --git a/src/games/behavmixed.cc b/src/games/behavmixed.cc index 640e679b0..cacb3b128 100644 --- a/src/games/behavmixed.cc +++ b/src/games/behavmixed.cc @@ -136,12 +136,21 @@ template MixedBehaviorProfile & MixedBehaviorProfile::operator=(const MixedBehaviorProfile &p_profile) { - if (this != &p_profile && m_support == p_profile.m_support) { - InvalidateCache(); - m_probs = p_profile.m_probs; - m_support = p_profile.m_support; - m_gameversion = p_profile.m_gameversion; + if (this == &p_profile) { + return *this; } + if (m_support != p_profile.m_support) { + throw MismatchException(); + } + InvalidateCache(); + m_probs = p_profile.m_probs; + m_gameversion = p_profile.m_gameversion; + map_realizProbs = p_profile.map_realizProbs; + map_beliefs = p_profile.map_beliefs; + map_nodeValues = p_profile.map_nodeValues; + map_infosetValues = p_profile.map_infosetValues; + map_actionValues = p_profile.map_actionValues; + map_regret = p_profile.map_regret; return *this; } diff --git a/src/pygambit/gambit.pxd b/src/pygambit/gambit.pxd index 86ede77cc..76321976d 100644 --- a/src/pygambit/gambit.pxd +++ b/src/pygambit/gambit.pxd @@ -455,13 +455,17 @@ cdef extern from "solvers/gnm/gnm.h": int p_localNewtonInterval, int p_localNewtonMaxits ) except +RuntimeError -cdef extern from "solvers/logit/nfglogit.h": - c_List[c_MixedStrategyProfileDouble] LogitStrategySolve(c_Game, - double, - double, - double) except +RuntimeError - cdef extern from "solvers/logit/efglogit.h": + cdef cppclass c_LogitQREMixedBehaviorProfile "LogitQREMixedBehaviorProfile": + c_LogitQREMixedBehaviorProfile(c_Game) except + + c_LogitQREMixedBehaviorProfile(c_LogitQREMixedBehaviorProfile) except + + c_Game GetGame() except + + c_MixedBehaviorProfileDouble GetProfile() # except + doesn't compile + double GetLambda() except + + double GetLogLike() except + + int BehaviorProfileLength() except + + double getitem "operator[]"(int) except +IndexError + c_List[c_MixedBehaviorProfileDouble] LogitBehaviorSolve(c_Game, double, double, @@ -484,11 +488,23 @@ cdef extern from "solvers/logit/nfglogit.h": c_MixedStrategyProfileDouble, double, double, double) except +RuntimeError + c_List[c_MixedStrategyProfileDouble] LogitStrategySolve(c_Game, + double, + double, + double) except +RuntimeError + + cdef extern from "nash.h": - shared_ptr[c_LogitQREMixedStrategyProfile] _logit_estimate "logit_estimate"( + shared_ptr[c_LogitQREMixedBehaviorProfile] LogitBehaviorEstimateHelper( + shared_ptr[c_MixedBehaviorProfileDouble], double, double + ) except + + shared_ptr[c_LogitQREMixedBehaviorProfile] LogitBehaviorAtLambdaHelper( + c_Game, double, double, double + ) except + + shared_ptr[c_LogitQREMixedStrategyProfile] LogitStrategyEstimateHelper( shared_ptr[c_MixedStrategyProfileDouble], double, double ) except + - shared_ptr[c_LogitQREMixedStrategyProfile] _logit_atlambda "logit_atlambda"( + shared_ptr[c_LogitQREMixedStrategyProfile] LogitStrategyAtLambdaHelper( c_Game, double, double, double ) except + c_List[c_LogitQREMixedStrategyProfile] _logit_principal_branch "logit_principal_branch"( diff --git a/src/pygambit/nash.h b/src/pygambit/nash.h index f4d61949f..0782c9daa 100644 --- a/src/pygambit/nash.h +++ b/src/pygambit/nash.h @@ -21,6 +21,7 @@ // #include "gambit.h" +#include "solvers/logit/efglogit.h" #include "solvers/logit/nfglogit.h" using namespace std; @@ -31,23 +32,41 @@ class NullBuffer : public std::streambuf { int overflow(int c) { return c; } }; -std::shared_ptr -logit_estimate(std::shared_ptr> p_frequencies, double p_firstStep, - double p_maxAccel) +std::shared_ptr +LogitBehaviorEstimateHelper(std::shared_ptr> p_frequencies, + double p_firstStep, double p_maxAccel) { - LogitQREMixedStrategyProfile start(p_frequencies->GetGame()); - StrategicQREEstimator alg; + return make_shared( + LogitBehaviorEstimate(*p_frequencies, p_firstStep, p_maxAccel)); +} + +std::shared_ptr LogitBehaviorAtLambdaHelper(const Game &p_game, + double p_lambda, + double p_firstStep, + double p_maxAccel) +{ + LogitQREMixedBehaviorProfile start(p_game); + AgentQREPathTracer alg; alg.SetMaxDecel(p_maxAccel); alg.SetStepsize(p_firstStep); NullBuffer null_buffer; std::ostream null_stream(&null_buffer); - LogitQREMixedStrategyProfile result = - alg.Estimate(start, *p_frequencies, null_stream, 1000000.0, 1.0); - return make_shared(result); + return make_shared( + alg.SolveAtLambda(start, null_stream, p_lambda, 1.0)); +} + +std::shared_ptr +LogitStrategyEstimateHelper(std::shared_ptr> p_frequencies, + double p_firstStep, double p_maxAccel) +{ + return make_shared( + LogitStrategyEstimate(*p_frequencies, p_firstStep, p_maxAccel)); } -std::shared_ptr logit_atlambda(const Game &p_game, double p_lambda, - double p_firstStep, double p_maxAccel) +std::shared_ptr LogitStrategyAtLambdaHelper(const Game &p_game, + double p_lambda, + double p_firstStep, + double p_maxAccel) { LogitQREMixedStrategyProfile start(p_game); StrategicQREPathTracer alg; diff --git a/src/pygambit/nash.pxi b/src/pygambit/nash.pxi index 135f9e93e..92c0d1135 100644 --- a/src/pygambit/nash.pxi +++ b/src/pygambit/nash.pxi @@ -240,26 +240,26 @@ class LogitQREMixedStrategyProfile: return profile -def logit_estimate(profile: MixedStrategyProfileDouble, - first_step: float = .03, - max_accel: float = 1.1) -> LogitQREMixedStrategyProfile: +def _logit_strategy_estimate(profile: MixedStrategyProfileDouble, + first_step: float = .03, + max_accel: float = 1.1) -> LogitQREMixedStrategyProfile: """Estimate QRE corresponding to mixed strategy profile using maximum likelihood along the principal branch. """ - ret = LogitQREMixedStrategyProfile() - ret.thisptr = _logit_estimate(profile.profile, first_step, max_accel) + ret = LogitQREMixedStrategyProfile(profile.game) + ret.thisptr = LogitStrategyEstimateHelper(profile.profile, first_step, max_accel) return ret -def logit_atlambda(game: Game, - lam: float, - first_step: float = .03, - max_accel: float = 1.1) -> LogitQREMixedStrategyProfile: - """Compute the first QRE along the principal branch with the given - lambda parameter. +def logit_strategy_atlambda(game: Game, + lam: float, + first_step: float = .03, + max_accel: float = 1.1) -> LogitQREMixedStrategyProfile: + """Compute the first QRE encountered along the principal branch of the strategic + game corresponding to lambda value `lam`. """ ret = LogitQREMixedStrategyProfile() - ret.thisptr = _logit_atlambda(game.game, lam, first_step, max_accel) + ret.thisptr = LogitStrategyAtLambdaHelper(game.game, lam, first_step, max_accel) return ret @@ -271,3 +271,72 @@ def logit_principal_branch(game: Game, first_step: float = .03, max_accel: float p.thisptr = copyitem_list_qrem(solns, i+1) ret.append(p) return ret + + +@cython.cclass +class LogitQREMixedBehaviorProfile: + thisptr = cython.declare(shared_ptr[c_LogitQREMixedBehaviorProfile]) + + def __init__(self, game=None): + if game is not None: + self.thisptr = make_shared[c_LogitQREMixedBehaviorProfile]( + cython.cast(Game, game).game + ) + + def __repr__(self): + return f"LogitQREMixedBehaviorProfile(lam={self.lam},profile={self.profile})" + + def __len__(self): + return deref(self.thisptr).BehaviorProfileLength() + + def __getitem__(self, int i): + return deref(self.thisptr).getitem(i+1) + + @property + def game(self) -> Game: + """The game on which this mixed strategy profile is defined.""" + g = Game() + g.game = deref(self.thisptr).GetGame() + return g + + @property + def lam(self) -> double: + """The value of the precision parameter.""" + return deref(self.thisptr).GetLambda() + + @property + def log_like(self) -> double: + """The log-likelihood of the data.""" + return deref(self.thisptr).GetLogLike() + + @property + def profile(self) -> MixedBehaviorProfileDouble: + """The mixed strategy profile.""" + profile = MixedBehaviorProfileDouble() + profile.profile = ( + make_shared[c_MixedBehaviorProfileDouble](deref(self.thisptr).GetProfile()) + ) + return profile + + +def _logit_behavior_estimate(profile: MixedBehaviorProfileDouble, + first_step: float = .03, + max_accel: float = 1.1) -> LogitQREMixedBehaviorProfile: + """Estimate QRE corresponding to mixed behavior profile using + maximum likelihood along the principal branch. + """ + ret = LogitQREMixedBehaviorProfile(profile.game) + ret.thisptr = LogitBehaviorEstimateHelper(profile.profile, first_step, max_accel) + return ret + + +def logit_behavior_atlambda(game: Game, + lam: float, + first_step: float = .03, + max_accel: float = 1.1) -> LogitQREMixedBehaviorProfile: + """Compute the first QRE encountered along the principal branch of the extensive + game corresponding to lambda value `lam`. + """ + ret = LogitQREMixedBehaviorProfile() + ret.thisptr = LogitBehaviorAtLambdaHelper(game.game, lam, first_step, max_accel) + return ret diff --git a/src/pygambit/nash.py b/src/pygambit/nash.py index cbd32daf3..a96da66d3 100644 --- a/src/pygambit/nash.py +++ b/src/pygambit/nash.py @@ -587,5 +587,6 @@ def logit_solve( ) -logit_atlambda = libgbt.logit_atlambda +logit_behavior_atlambda = libgbt.logit_behavior_atlambda +logit_strategy_atlambda = libgbt.logit_strategy_atlambda logit_principal_branch = libgbt.logit_principal_branch diff --git a/src/pygambit/qre.py b/src/pygambit/qre.py index df2f6661b..dea81fa5d 100644 --- a/src/pygambit/qre.py +++ b/src/pygambit/qre.py @@ -28,7 +28,9 @@ import numpy import scipy.optimize -from . import gambit, pctrace +import pygambit.gambit as libgbt + +from . import pctrace from .profiles import Solution @@ -299,8 +301,8 @@ class LogitQREMixedStrategyFitResult: See Also -------- - fit_fixedpoint - fit_empirical + fit_strategy_fixedpoint + fit_strategy_empirical """ def __init__(self, data, method, lam, profile, log_like): self._data = data @@ -315,7 +317,7 @@ def method(self) -> str: return self._method @property - def data(self) -> gambit.MixedStrategyProfileDouble: + def data(self) -> libgbt.MixedStrategyProfileDouble: """The empirical strategy frequencies used to estimate the QRE.""" return self._data @@ -325,7 +327,7 @@ def lam(self) -> float: return self._lam @property - def profile(self) -> gambit.MixedStrategyProfileDouble: + def profile(self) -> libgbt.MixedStrategyProfileDouble: """The mixed strategy profile corresponding to the QRE.""" return self._profile @@ -334,21 +336,23 @@ def log_like(self) -> float: """The log-likelihood of the data at the estimated QRE.""" return self._log_like - def __repr__(self): + def __repr__(self) -> str: return ( f"" ) -def fit_fixedpoint( - data: gambit.MixedStrategyProfileDouble +def fit_strategy_fixedpoint( + data: libgbt.MixedStrategyProfileDouble ) -> LogitQREMixedStrategyFitResult: """Use maximum likelihood estimation to find the logit quantal response equilibrium on the principal branch for a strategic game which best fits empirical frequencies of play. [1]_ - .. versionadded:: 16.1.0 + .. versionchanged:: 16.2.0 + + Renamed from `fit_fixedpoint` to disambiguate from agent version Parameters ---------- @@ -366,8 +370,8 @@ def fit_fixedpoint( See Also -------- - fit_empirical : Estimate QRE by approximation of the correspondence - using independent decision problems. + fit_strategy_empirical : Estimate QRE by approximation of the correspondence + using independent decision problems. References ---------- @@ -375,14 +379,14 @@ def fit_fixedpoint( as a structural model for estimation: The missing manual. SSRN working paper 4425515. """ - res = gambit.logit_estimate(data) + res = libgbt._logit_strategy_estimate(data) return LogitQREMixedStrategyFitResult( data, "fixedpoint", res.lam, res.profile, res.log_like ) -def fit_empirical( - data: gambit.MixedStrategyProfileDouble +def fit_strategy_empirical( + data: libgbt.MixedStrategyProfileDouble ) -> LogitQREMixedStrategyFitResult: """Use maximum likelihood estimation to estimate a quantal response equilibrium using the empirical payoff method. @@ -390,7 +394,9 @@ def fit_empirical( considerations of the QRE and approximates instead by a collection of independent decision problems. [1]_ - .. versionadded:: 16.1.0 + .. versionchanged:: 16.2.0 + + Renamed from `fit_empirical` to disambiguate from agent version Returns ------- @@ -400,7 +406,7 @@ def fit_empirical( See Also -------- - fit_fixedpoint : Estimate QRE precisely by computing the correspondence + fit_strategy_fixedpoint : Estimate QRE precisely by computing the correspondence References ---------- @@ -429,3 +435,89 @@ def log_like(lam: float) -> float: return LogitQREMixedStrategyFitResult( data, "empirical", res.x[0], do_logit(res.x[0]), -res.fun ) + + +class LogitQREMixedBehaviorFitResult: + """The result of fitting a QRE to a given probability distribution + over actions. + + See Also + -------- + fit_behavior_fixedpoint + """ + def __init__(self, data, method, lam, profile, log_like): + self._data = data + self._method = method + self._lam = lam + self._profile = profile + self._log_like = log_like + + @property + def method(self) -> str: + """The method used to estimate the QRE; either "fixedpoint" or "empirical".""" + return self._method + + @property + def data(self) -> libgbt.MixedBehaviorProfileDouble: + """The empirical actions frequencies used to estimate the QRE.""" + return self._data + + @property + def lam(self) -> float: + """The value of lambda corresponding to the QRE.""" + return self._lam + + @property + def profile(self) -> libgbt.MixedBehaviorProfileDouble: + """The mixed behavior profile corresponding to the QRE.""" + return self._profile + + @property + def log_like(self) -> float: + """The log-likelihood of the data at the estimated QRE.""" + return self._log_like + + def __repr__(self) -> str: + return ( + f"" + ) + + +def fit_behavior_fixedpoint( + data: libgbt.MixedBehaviorProfileDouble +) -> LogitQREMixedBehaviorFitResult: + """Use maximum likelihood estimation to find the logit quantal + response equilibrium on the principal branch for an extensive game + which best fits empirical frequencies of play. [1]_ + + .. versionadded:: 16.2.0 + + Parameters + ---------- + data : MixedBehaviorProfileDouble + The empirical distribution of play to which to fit the QRE. + To obtain the correct resulting log-likelihood, these should + be expressed as total counts of observations of each action + rather than probabilities. + + Returns + ------- + LogitQREMixedBehaviorFitResult + The result of the estimation represented as a + ``LogitQREMixedBehaviorFitResult`` object. + + See Also + -------- + fit_strategy_fixedpoint : Estimate QRE using the strategic representation + + References + ---------- + .. [1] Bland, J. R. and Turocy, T. L., 2023. Quantal response equilibrium + as a structural model for estimation: The missing manual. + SSRN working paper 4425515. + """ + res = libgbt._logit_behavior_estimate(data) + return LogitQREMixedBehaviorFitResult( + data, "fixedpoint", res.lam, res.profile, res.log_like + ) diff --git a/src/solvers/logit/efglogit.cc b/src/solvers/logit/efglogit.cc index 46b99b3d1..3daf967b6 100644 --- a/src/solvers/logit/efglogit.cc +++ b/src/solvers/logit/efglogit.cc @@ -287,7 +287,7 @@ void AgentQREPathTracer::CallbackFunction::operator()(const Vector &p_po for (int i = 1; i < p_point.Length(); i++) { profile[i] = exp(p_point[i]); } - m_profiles.push_back(LogitQREMixedBehaviorProfile(profile, p_point.back())); + m_profiles.push_back(LogitQREMixedBehaviorProfile(profile, p_point.back(), 0.0)); } //------------------------------------------------------------------------------ @@ -300,7 +300,7 @@ class AgentQREPathTracer::LambdaCriterion : public PathTracer::CriterionFunction double operator()(const Vector &p_point, const Vector &p_tangent) const override { - return p_point[p_point.Length()] - m_lambda; + return p_point.back() - m_lambda; } private: @@ -372,4 +372,160 @@ AgentQREPathTracer::SolveAtLambda(const LogitQREMixedBehaviorProfile &p_start, return func.GetProfiles().back(); } +//---------------------------------------------------------------------------- +// AgentQREEstimator: Criterion function +//---------------------------------------------------------------------------- + +namespace { +double LogLike(const Vector &p_frequencies, const Vector &p_point) +{ + double logL = 0.0; + for (int i = 1; i <= p_frequencies.Length(); i++) { + logL += p_frequencies[i] * log(p_point[i]); + } + return logL; +} + +} // end anonymous namespace + +class AgentQREEstimator::CriterionFunction : public PathTracer::CriterionFunction { +public: + explicit CriterionFunction(const Vector &p_frequencies) : m_frequencies(p_frequencies) {} + ~CriterionFunction() override = default; + + double operator()(const Vector &p_point, const Vector &p_tangent) const override + { + double diff_logL = 0.0; + for (int i = 1; i <= m_frequencies.Length(); i++) { + diff_logL += m_frequencies[i] * p_tangent[i]; + } + return diff_logL; + } + +private: + Vector m_frequencies; +}; + +//---------------------------------------------------------------------------- +// AgentQREEstimator: Callback function +//---------------------------------------------------------------------------- + +class AgentQREEstimator::CallbackFunction : public PathTracer::CallbackFunction { +public: + CallbackFunction(std::ostream &p_stream, const Game &p_game, const Vector &p_frequencies, + bool p_fullGraph, int p_decimals); + ~CallbackFunction() override = default; + + void operator()(const Vector &p_point, bool p_isTerminal) const override; + + LogitQREMixedBehaviorProfile GetMaximizer() const + { + return {m_bestProfile, m_bestLambda, m_maxlogL}; + } + void PrintMaximizer() const; + +private: + void PrintProfile(const MixedBehaviorProfile &, double) const; + + std::ostream &m_stream; + Game m_game; + const Vector &m_frequencies; + bool m_fullGraph; + int m_decimals; + mutable MixedBehaviorProfile m_bestProfile; + mutable double m_bestLambda{0.0}; + mutable double m_maxlogL; +}; + +AgentQREEstimator::CallbackFunction::CallbackFunction(std::ostream &p_stream, const Game &p_game, + const Vector &p_frequencies, + bool p_fullGraph, int p_decimals) + : m_stream(p_stream), m_game(p_game), m_frequencies(p_frequencies), m_fullGraph(p_fullGraph), + m_decimals(p_decimals), m_bestProfile(p_game), + m_maxlogL(LogLike(p_frequencies, static_cast &>(m_bestProfile))) +{ +} + +void AgentQREEstimator::CallbackFunction::PrintProfile( + const MixedBehaviorProfile &p_profile, double p_logL) const +{ + for (size_t i = 1; i <= p_profile.BehaviorProfileLength(); i++) { + m_stream << "," << std::setprecision(m_decimals) << p_profile[i]; + } + m_stream.setf(std::ios::fixed); + m_stream << "," << std::setprecision(m_decimals); + m_stream << p_logL; + m_stream.unsetf(std::ios::fixed); +} + +void AgentQREEstimator::CallbackFunction::PrintMaximizer() const +{ + m_stream.setf(std::ios::fixed); + // By convention, we output lambda first + m_stream << std::setprecision(m_decimals) << m_bestLambda; + m_stream.unsetf(std::ios::fixed); + PrintProfile(m_bestProfile, m_maxlogL); + m_stream << std::endl; +} + +void AgentQREEstimator::CallbackFunction::operator()(const Vector &x, + bool p_isTerminal) const +{ + m_stream.setf(std::ios::fixed); + // By convention, we output lambda first + if (!p_isTerminal) { + m_stream << std::setprecision(m_decimals) << x[x.Length()]; + } + else { + m_stream << "NE"; + } + m_stream.unsetf(std::ios::fixed); + MixedBehaviorProfile profile(m_game); + for (int i = 1; i < x.Length(); i++) { + profile[i] = exp(x[i]); + } + double logL = LogLike(m_frequencies, static_cast &>(profile)); + PrintProfile(profile, logL); + m_stream << std::endl; + if (logL > m_maxlogL) { + m_maxlogL = logL; + m_bestLambda = x[x.Length()]; + m_bestProfile = profile; + } +} + +//---------------------------------------------------------------------------- +// AgentQREEstimator: Main driver routine +//---------------------------------------------------------------------------- + +LogitQREMixedBehaviorProfile +AgentQREEstimator::Estimate(const LogitQREMixedBehaviorProfile &p_start, + const MixedBehaviorProfile &p_frequencies, + std::ostream &p_stream, double p_maxLambda, double p_omega) +{ + if (p_start.GetGame() != p_frequencies.GetGame()) { + throw MismatchException(); + } + + Vector x(p_start.BehaviorProfileLength() + 1); + for (int i = 1; i <= p_start.BehaviorProfileLength(); i++) { + x[i] = log(p_start[i]); + } + x.back() = p_start.GetLambda(); + + CallbackFunction callback(p_stream, p_start.GetGame(), + static_cast &>(p_frequencies), m_fullGraph, + m_decimals); + while (x.back() < p_maxLambda) { + TracePath( + EquationSystem(p_start.GetGame()), x, p_omega, + [p_maxLambda](const Vector &p_point) { + return LambdaRangeTerminationFunction(p_point, 0, p_maxLambda); + }, + callback, CriterionFunction(static_cast &>(p_frequencies))); + } + callback.PrintMaximizer(); + return callback.GetMaximizer(); +} + } // end namespace Gambit diff --git a/src/solvers/logit/efglogit.h b/src/solvers/logit/efglogit.h index 1f759b519..97022edda 100644 --- a/src/solvers/logit/efglogit.h +++ b/src/solvers/logit/efglogit.h @@ -31,12 +31,14 @@ namespace Gambit { class LogitQREMixedBehaviorProfile { public: explicit LogitQREMixedBehaviorProfile(const Game &p_game) : m_profile(p_game), m_lambda(0.0) {} - LogitQREMixedBehaviorProfile(const MixedBehaviorProfile &p_profile, double p_lambda) - : m_profile(p_profile), m_lambda(p_lambda) + LogitQREMixedBehaviorProfile(const MixedBehaviorProfile &p_profile, double p_lambda, + double p_logLike) + : m_profile(p_profile), m_lambda(p_lambda), m_logLike(p_logLike) { } double GetLambda() const { return m_lambda; } const MixedBehaviorProfile &GetProfile() const { return m_profile; } + double GetLogLike() const { return m_logLike; } Game GetGame() const { return m_profile.GetGame(); } size_t BehaviorProfileLength() const { return m_profile.BehaviorProfileLength(); } @@ -45,6 +47,7 @@ class LogitQREMixedBehaviorProfile { private: const MixedBehaviorProfile m_profile; double m_lambda; + double m_logLike; }; class AgentQREPathTracer : public PathTracer { @@ -65,7 +68,7 @@ class AgentQREPathTracer : public PathTracer { void SetDecimals(int p_decimals) { m_decimals = p_decimals; } int GetDecimals() const { return m_decimals; } -private: +protected: bool m_fullGraph; int m_decimals; @@ -74,6 +77,33 @@ class AgentQREPathTracer : public PathTracer { class LambdaCriterion; }; +class AgentQREEstimator : public AgentQREPathTracer { +public: + AgentQREEstimator() = default; + ~AgentQREEstimator() override = default; + + LogitQREMixedBehaviorProfile Estimate(const LogitQREMixedBehaviorProfile &p_start, + const MixedBehaviorProfile &p_frequencies, + std::ostream &p_logStream, double p_maxLambda, + double p_omega); + +protected: + class CriterionFunction; + class CallbackFunction; +}; + +inline LogitQREMixedBehaviorProfile +LogitBehaviorEstimate(const MixedBehaviorProfile &p_frequencies, double p_firstStep, + double p_maxAccel) +{ + LogitQREMixedBehaviorProfile start(p_frequencies.GetGame()); + AgentQREEstimator alg; + alg.SetMaxDecel(p_maxAccel); + alg.SetStepsize(p_firstStep); + std::ostringstream ostream; + return alg.Estimate(start, p_frequencies, ostream, 1000000.0, 1.0); +} + inline List> LogitBehaviorSolve(const Game &p_game, double p_epsilon, double p_firstStep, double p_maxAccel) { diff --git a/src/solvers/logit/nfglogit.h b/src/solvers/logit/nfglogit.h index 056d0e897..da704b28f 100644 --- a/src/solvers/logit/nfglogit.h +++ b/src/solvers/logit/nfglogit.h @@ -103,6 +103,18 @@ class StrategicQREEstimator : public StrategicQREPathTracer { class CallbackFunction; }; +inline LogitQREMixedStrategyProfile +LogitStrategyEstimate(const MixedStrategyProfile &p_frequencies, double p_firstStep, + double p_maxAccel) +{ + LogitQREMixedStrategyProfile start(p_frequencies.GetGame()); + StrategicQREEstimator alg; + alg.SetMaxDecel(p_maxAccel); + alg.SetStepsize(p_firstStep); + std::ostringstream ostream; + return alg.Estimate(start, p_frequencies, ostream, 1000000.0, 1.0); +} + inline List> LogitStrategySolve(const Game &p_game, double p_regret, double p_firstStep, double p_maxAccel) {