Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SERVER-77901 query plancache replan improvement #1553

Open
wants to merge 4 commits into
base: v5.0
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
51 changes: 41 additions & 10 deletions src/mongo/db/exec/cached_plan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ CachedPlanStage::CachedPlanStage(ExpressionContext* expCtx,
_ws(ws),
_canonicalQuery(cq),
_plannerParams(params),
_decisionWorks(decisionWorks) {
_decisionWorks{decisionWorks} {
_children.emplace_back(std::move(root));
}

Expand All @@ -90,7 +90,29 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// If we work this many times during the trial period, then we will replan the
// query from scratch.
size_t maxWorksBeforeReplan =
static_cast<size_t>(internalQueryCacheEvictionRatio * _decisionWorks);
static_cast<size_t>(internalQueryCacheEvictionRatio * getDecisionWorks());

// the replan works can not exceed the number of works that is set to be the fraction of the
// collection size.
// in extreme cases, if there are no restrictions, Probably more than the total collection size,
// this replan may lose effectiveness. it may lead that the query use wrong index.
size_t numUpperLimitWorks = trial_period::getTrialPeriodMaxWorks(opCtx(), collection());
if (maxWorksBeforeReplan > numUpperLimitWorks) {
maxWorksBeforeReplan = numUpperLimitWorks;
}

// sometimes the _decisionWorks is greater than numUpperLimitWorks, even greater than the collection size.
// for example: We deleted a lot of data in the process of querying the data
//
// Force adjust the cache entry works, If not adjust, the replan will lose effectiveness
if (getDecisionWorks() > numUpperLimitWorks) {
LOGV2_DEBUG(20578,
1,
"Force adjust the cache entry works",
"oldWorks"_attr = getDecisionWorks(),
"newWorks"_attr = numUpperLimitWorks);
_decisionWorks.store(numUpperLimitWorks);
}

// The trial period ends without replanning if the cached plan produces this many results.
size_t numResults = trial_period::getTrialPeriodNumToReturn(*_canonicalQuery);
Expand Down Expand Up @@ -167,18 +189,27 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
1,
"Evicting cache entry and replanning query",
"maxWorksBeforeReplan"_attr = maxWorksBeforeReplan,
"decisionWorks"_attr = _decisionWorks,
"decisionWorks"_attr = getDecisionWorks(),
"query"_attr = redact(_canonicalQuery->toStringShort()),
"planSummary"_attr = explainer->getPlanSummary());

const bool shouldCache = true;
return replan(
yieldPolicy,
shouldCache,
str::stream()
<< "cached plan was less efficient than expected: expected trial execution to take "
<< _decisionWorks << " works but it took at least " << maxWorksBeforeReplan
<< " works");
if (getDecisionWorks() < maxWorksBeforeReplan) {
return replan(
yieldPolicy,
shouldCache,
str::stream()
<< "cached plan was less efficient than expected: expected trial execution to take "
<< getDecisionWorks() << " works but it took at least " << maxWorksBeforeReplan
<< " works");
} else {
return replan(
yieldPolicy,
shouldCache,
str::stream()
<< "cached plan may be less efficient than expected: expected decisionWorks is "
<< getDecisionWorks() << ", it is same to the maxWorksBeforeReplan");
}
}

Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
Expand Down
7 changes: 6 additions & 1 deletion src/mongo/db/exec/cached_plan.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/record_id.h"
#include "mongo/platform/atomic_word.h"

namespace mongo {

Expand Down Expand Up @@ -72,6 +73,10 @@ class CachedPlanStage final : public RequiresAllIndicesStage {
return STAGE_CACHED_PLAN;
}

size_t getDecisionWorks() const {
return _decisionWorks.load();
}

std::unique_ptr<PlanStageStats> getStats() final;

const SpecificStats* getSpecificStats() const final;
Expand Down Expand Up @@ -123,7 +128,7 @@ class CachedPlanStage final : public RequiresAllIndicesStage {

// The number of work cycles taken to decide on a winning plan when the plan was first
// cached.
size_t _decisionWorks;
AtomicWord<size_t> _decisionWorks{0};

// If we fall back to re-planning the query, and there is just one resulting query solution,
// that solution is owned here.
Expand Down