Skip to content

Commit

Permalink
Merge pull request #62453 from ClickHouse/lower-contention-on-stacktr…
Browse files Browse the repository at this point in the history
…ace-cache

Use shared mutex for global stacktrace cache
  • Loading branch information
serxa committed Apr 10, 2024
2 parents ffc4046 + 5d576bb commit 027c8a8
Showing 1 changed file with 23 additions and 44 deletions.
67 changes: 23 additions & 44 deletions src/Common/StackTrace.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <Common/Dwarf.h>
#include <Common/Elf.h>
#include <Common/MemorySanitizer.h>
#include <Common/SharedMutex.h>
#include <Common/SymbolIndex.h>

#include <IO/WriteBufferFromString.h>
Expand Down Expand Up @@ -480,10 +481,8 @@ void StackTrace::toStringEveryLine(void ** frame_pointers_raw, size_t offset, si

struct CacheEntry
{
std::mutex mutex;
std::optional<String> stacktrace_string;
bool to_string_in_progress = false;

std::condition_variable cv;
};

using CacheEntryPtr = std::shared_ptr<CacheEntry>;
Expand All @@ -496,67 +495,47 @@ static StackTraceCache & cacheInstance()
return cache;
}

static std::mutex stacktrace_cache_mutex;
static DB::SharedMutex stacktrace_cache_mutex;

String toStringCached(const StackTrace::FramePointers & pointers, size_t offset, size_t size)
{
const StackTraceRefTriple key{pointers, offset, size};

/// Calculation of stack trace text is extremely slow.
/// We use simple cache because otherwise the server could be overloaded by trash queries.
/// We use cache because otherwise the server could be overloaded by trash queries.
/// Note that this cache can grow unconditionally, but practically it should be small.
std::unique_lock lock{stacktrace_cache_mutex};
CacheEntryPtr cache_entry;
StackTraceCache & cache = cacheInstance();
if (auto it = cache.find(key); it != cache.end())
{
cache_entry = it->second;
}
else
CacheEntryPtr cache_entry;

// Optimistic try for cache hit to avoid any contention whatsoever, should be the main hot code route
{
auto [new_it, inserted] = cache.emplace(StackTraceTriple{pointers, offset, size}, std::make_shared<CacheEntry>());
chassert(inserted);
cache_entry = new_it->second;
std::shared_lock read_lock{stacktrace_cache_mutex};
if (auto it = cache.find(key); it != cache.end())
cache_entry = it->second;
}

if (!cache_entry->to_string_in_progress && cache_entry->stacktrace_string.has_value())
return *cache_entry->stacktrace_string;

if (cache_entry->to_string_in_progress)
// Create a new entry in case of a cache miss
if (!cache_entry)
{
cache_entry->cv.wait(lock, [&]{ return !cache_entry->to_string_in_progress; });
std::unique_lock write_lock{stacktrace_cache_mutex};

if (cache_entry->stacktrace_string.has_value())
return *cache_entry->stacktrace_string;
// We should recheck because `shared_lock` was released before we acquired `write_lock`
if (auto it = cache.find(key); it != cache.end())
cache_entry = it->second; // Another thread managed to created this entry before us
else
cache_entry = cache.emplace(StackTraceTriple{pointers, offset, size}, std::make_shared<CacheEntry>()).first->second;
}

cache_entry->to_string_in_progress = true;

lock.unlock();

String stacktrace_string;
try
// Do not hold `stacktrace_cache_mutex` while running possibly slow calculation of stack trace text
std::scoped_lock lock(cache_entry->mutex);
if (!cache_entry->stacktrace_string.has_value())
{
DB::WriteBufferFromOwnString out;
toStringEveryLineImpl(false, key, [&](std::string_view str) { out << str << '\n'; });
stacktrace_string = out.str();
cache_entry->stacktrace_string = out.str();
}
catch (...)
{
lock.lock();
cache_entry->to_string_in_progress = false;
lock.unlock();
cache_entry->cv.notify_one();
throw;
}

lock.lock();
cache_entry->to_string_in_progress = false;
cache_entry->stacktrace_string = stacktrace_string;
lock.unlock();

cache_entry->cv.notify_all();
return stacktrace_string;
return *cache_entry->stacktrace_string;
}

std::string StackTrace::toString() const
Expand Down

0 comments on commit 027c8a8

Please sign in to comment.