Skip to content

Commit

Permalink
Merge branch 'master' into feat/metrics-escape-tag-values
Browse files Browse the repository at this point in the history
* master:
  feat(metric-stats): Report cardinality to metric stats (#3360)
  release: 0.8.56
  fix(perfscore): Adds span op tag to perf score totals (#3326)
  ref(profiles): Return retention_days as part of the Kafka message (#3362)
  ref(filter): Add GTmetrix to the list of web crawlers (#3363)
  fix: Fix kafka topic default (#3350)
  ref(normalization): Remove duplicated normalization (#3355)
  feat(feedback): Emit outcomes for user feedback events (#3026)
  feat(cardinality): Implement cardinality reporting (#3342)
  • Loading branch information
jan-auer committed Apr 3, 2024
2 parents 41349a4 + ff241db commit 1635ee3
Show file tree
Hide file tree
Showing 30 changed files with 854 additions and 245 deletions.
5 changes: 4 additions & 1 deletion CHANGELOG.md
Expand Up @@ -18,13 +18,16 @@
- Apply rate limits to span metrics. ([#3255](https://github.com/getsentry/relay/pull/3255))
- Extract metrics from transaction spans. ([#3273](https://github.com/getsentry/relay/pull/3273), [#3324](https://github.com/getsentry/relay/pull/3324))
- Implement volume metric stats. ([#3281](https://github.com/getsentry/relay/pull/3281))
- Implement cardinality metric stats. ([#3360](https://github.com/getsentry/relay/pull/3360))
- Scrub transactions before enforcing quotas. ([#3248](https://github.com/getsentry/relay/pull/3248))
- Implement metric name based cardinality limits. ([#3313](https://github.com/getsentry/relay/pull/3313))
- Kafka topic config supports default topic names as keys. ([#3282](https://github.com/getsentry/relay/pull/3282))
- Kafka topic config supports default topic names as keys. ([#3282](https://github.com/getsentry/relay/pull/3282), [#3350](https://github.com/getsentry/relay/pull/3350))
- Set all span tags on the transaction span. ([#3310](https://github.com/getsentry/relay/pull/3310))
- Emit outcomes for user feedback events. ([#3026](https://github.com/getsentry/relay/pull/3026))
- Collect duration for all spans. ([#3322](https://github.com/getsentry/relay/pull/3322))
- Add `project_id` as part of the span Kafka message headers. ([#3320](https://github.com/getsentry/relay/pull/3320))
- Stop producing to sessions topic, the feature is now fully migrated to metrics. ([#3271](https://github.com/getsentry/relay/pull/3271))
- Pass `retention_days` in the Kafka profile messages. ([#3362](https://github.com/getsentry/relay/pull/3362))
- Support and expose namespaces for metric rate limit propagation via the `x-sentry-rate-limits` header. ([#3347](https://github.com/getsentry/relay/pull/3347))

## 24.3.0
Expand Down
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions py/CHANGELOG.md
@@ -1,5 +1,15 @@
# Changelog

## 0.8.56

### Various fixes & improvements

- feat(feedback): Emit outcomes for user feedback events (#3026) by @cmanallen
- release: 0.8.55 (109ac110) by @getsentry-bot
- feat(profiles): Add a new category to count profile chunks (#3303) by @phacops
- release: 0.8.52 (65defad1) by @getsentry-bot
- feat(profiles): Add a data category for continuous profiling (#3284) by @phacops

## 0.8.55

- Add a data category for profile chunks. [#3303](https://github.com/getsentry/relay/pull/3303))
Expand Down
1 change: 1 addition & 0 deletions py/sentry_relay/consts.py
Expand Up @@ -59,6 +59,7 @@ def event_categories(cls):
DataCategory.ERROR,
DataCategory.TRANSACTION,
DataCategory.SECURITY,
DataCategory.USER_REPORT_V2,
]

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion relay-cabi/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "relay-cabi"
version = "0.8.55"
version = "0.8.56"
authors = ["Sentry <oss@sentry.io>"]
homepage = "https://getsentry.github.io/relay/"
repository = "https://github.com/getsentry/relay"
Expand Down
44 changes: 27 additions & 17 deletions relay-cardinality/benches/redis_impl.rs
Expand Up @@ -4,10 +4,14 @@ use std::{
};

use criterion::{BatchSize, BenchmarkId, Criterion};
use relay_base_schema::{metrics::MetricNamespace, project::ProjectId};
use relay_base_schema::{
metrics::{MetricName, MetricNamespace},
project::ProjectId,
};
use relay_cardinality::{
limiter::{Entry, EntryId, Limiter, Rejections, Scoping},
CardinalityLimit, CardinalityScope, RedisSetLimiter, RedisSetLimiterOptions, SlidingWindow,
limiter::{Entry, EntryId, Limiter, Reporter, Scoping},
CardinalityLimit, CardinalityReport, CardinalityScope, RedisSetLimiter, RedisSetLimiterOptions,
SlidingWindow,
};
use relay_redis::{redis, RedisConfigOptions, RedisPool};

Expand All @@ -34,10 +38,12 @@ fn build_limiter(redis: RedisPool, reset_redis: bool) -> RedisSetLimiter {
)
}

struct NoopRejections;
struct NoopReporter;

impl<'a> Rejections<'a> for NoopRejections {
impl<'a> Reporter<'a> for NoopReporter {
fn reject(&mut self, _limit_id: &'a CardinalityLimit, _entry_id: EntryId) {}

fn report_cardinality(&mut self, _limit: &'a CardinalityLimit, _report: CardinalityReport) {}
}

#[derive(Debug)]
Expand All @@ -47,14 +53,17 @@ struct Params {

rounds: usize,
num_hashes: usize,

name: MetricName,
}

impl Params {
fn new(limit: u64, rounds: usize, num_hashes: usize) -> Self {
fn new(limit: u32, rounds: usize, num_hashes: usize) -> Self {
Self {
limits: vec![CardinalityLimit {
id: "limit".to_owned(),
passive: false,
report: false,
window: SlidingWindow {
window_seconds: 3600,
granularity_seconds: 360,
Expand All @@ -69,24 +78,25 @@ impl Params {
},
rounds,
num_hashes,
name: MetricName::from("foo"),
}
}

#[inline(always)]
fn run<'a>(&self, limiter: &RedisSetLimiter, entries: impl IntoIterator<Item = Entry<'a>>) {
limiter
.check_cardinality_limits(self.scoping, &self.limits, entries, &mut NoopRejections)
.check_cardinality_limits(self.scoping, &self.limits, entries, &mut NoopReporter)
.unwrap();
}

/// Every round contains the same hashes.
fn rounds(&self) -> Vec<Vec<Entry<'static>>> {
fn rounds(&self) -> Vec<Vec<Entry<'_>>> {
let entries = (0..self.num_hashes)
.map(|i| {
Entry::new(
EntryId(i),
MetricNamespace::Custom,
"foo",
&self.name,
u32::MAX - (i as u32),
)
})
Expand All @@ -98,7 +108,7 @@ impl Params {
}

/// High cardinality, every round contains unique hashes.
fn rounds_unique(&self) -> Vec<Vec<Entry<'static>>> {
fn rounds_unique(&self) -> Vec<Vec<Entry<'_>>> {
let hash = AtomicU32::new(u32::MAX);

(0..self.rounds)
Expand All @@ -108,7 +118,7 @@ impl Params {
Entry::new(
EntryId(i),
MetricNamespace::Custom,
"foo",
&self.name,
hash.fetch_sub(1, Ordering::SeqCst),
)
})
Expand All @@ -118,19 +128,19 @@ impl Params {
}

/// Entry which is never generated by either [`Self::rounds`] or [`Self::rounds_unique`].
fn never_entry() -> Entry<'static> {
Entry::new(EntryId(usize::MAX), MetricNamespace::Custom, "foo", 0)
fn never_entry(&self) -> Entry<'_> {
Entry::new(EntryId(usize::MAX), MetricNamespace::Custom, &self.name, 0)
}

/// A vector of entries which is never generated by either [`Self::rounds`] or [`Self::rounds_unique`].
fn never_entries(&self) -> Vec<Entry<'static>> {
fn never_entries(&self) -> Vec<Entry<'_>> {
(0..self.limits[0].limit)
.map(|i| {
Entry::new(
EntryId(usize::MAX - i as usize),
MetricNamespace::Custom,
"foo",
i as u32,
&self.name,
i,
)
})
.collect::<Vec<_>>()
Expand Down Expand Up @@ -235,7 +245,7 @@ pub fn bench_cache_never_full(c: &mut Criterion) {
b.iter_batched(
|| {
let limiter = build_limiter(redis.clone(), true);
params.run(&limiter, vec![Params::never_entry()]);
params.run(&limiter, vec![params.never_entry()]);

// New limiter to reset cache.
let limiter = build_limiter(redis.clone(), false);
Expand Down
12 changes: 11 additions & 1 deletion relay-cardinality/src/config.rs
Expand Up @@ -9,17 +9,26 @@ use crate::SlidingWindow;
pub struct CardinalityLimit {
/// Unique identifier of the cardinality limit.
pub id: String,

/// Whether this is a passive limit.
///
/// Passive limits are tracked separately to normal limits
/// and are not enforced, but still evaluated.
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
pub passive: bool,
/// If `true` additional reporting of cardinality is enabled.
///
/// The cardinality limiter will keep track of every tracked limit
/// and record the current cardinality. The reported data is not per limit
/// but per scope. For example if [`Self::scope`] is set to [`CardinalityScope::Name`],
/// the current cardinality for each metric name is reported.
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
pub report: bool,

/// The sliding window to enforce the cardinality limits in.
pub window: SlidingWindow,
/// The cardinality limit.
pub limit: u64,
pub limit: u32,

/// Scope which the limit applies to.
pub scope: CardinalityScope,
Expand Down Expand Up @@ -61,6 +70,7 @@ mod tests {
let limit = CardinalityLimit {
id: "some_id".to_string(),
passive: false,
report: false,
window: SlidingWindow {
window_seconds: 3600,
granularity_seconds: 200,
Expand Down
2 changes: 1 addition & 1 deletion relay-cardinality/src/lib.rs
Expand Up @@ -16,7 +16,7 @@ mod window;

pub use self::config::*;
pub use self::error::*;
pub use self::limiter::{CardinalityItem, CardinalityLimits, Scoping};
pub use self::limiter::{CardinalityItem, CardinalityLimits, CardinalityReport, Scoping};
#[cfg(feature = "redis")]
pub use self::redis::{RedisSetLimiter, RedisSetLimiterOptions};
pub use self::window::SlidingWindow;
Expand Down

0 comments on commit 1635ee3

Please sign in to comment.