From f17eeeda02afa5d588fa6e81c63a7705c13816c2 Mon Sep 17 00:00:00 2001 From: Yuanli Han Date: Mon, 18 Mar 2024 15:23:59 +0800 Subject: [PATCH 1/8] Add Apache Druid receiver --- .github/ISSUE_TEMPLATE/bug_report.yaml | 1 + .github/ISSUE_TEMPLATE/feature_request.yaml | 1 + .github/ISSUE_TEMPLATE/other.yaml | 1 + cmd/otelcontribcol/builder-config.yaml | 3 +- cmd/otelcontribcol/components.go | 2 + cmd/otelcontribcol/go.mod | 3 + receiver/apachedruidreceiver/Makefile | 1 + receiver/apachedruidreceiver/README.md | 44 + receiver/apachedruidreceiver/config.go | 19 + receiver/apachedruidreceiver/doc.go | 6 + receiver/apachedruidreceiver/documentation.md | 3217 ++++ receiver/apachedruidreceiver/factory.go | 37 + .../generated_component_test.go | 68 + receiver/apachedruidreceiver/go.mod | 70 + receiver/apachedruidreceiver/go.sum | 154 + .../internal/metadata/generated_config.go | 1066 ++ .../metadata/generated_config_test.go | 606 + .../internal/metadata/generated_metrics.go | 14859 ++++++++++++++++ .../metadata/generated_metrics_test.go | 5541 ++++++ .../internal/metadata/generated_resource.go | 50 + .../metadata/generated_resource_test.go | 52 + .../internal/metadata/generated_status.go | 26 + .../internal/metadata/testdata/config.yaml | 999 ++ receiver/apachedruidreceiver/metadata.yaml | 2541 +++ receiver/apachedruidreceiver/receiver.go | 27 + receiver/apachedruidreceiver/receiver_test.go | 55 + versions.yaml | 1 + 27 files changed, 29449 insertions(+), 1 deletion(-) create mode 100644 receiver/apachedruidreceiver/Makefile create mode 100644 receiver/apachedruidreceiver/README.md create mode 100644 receiver/apachedruidreceiver/config.go create mode 100644 receiver/apachedruidreceiver/doc.go create mode 100644 receiver/apachedruidreceiver/documentation.md create mode 100644 receiver/apachedruidreceiver/factory.go create mode 100644 receiver/apachedruidreceiver/generated_component_test.go create mode 100644 receiver/apachedruidreceiver/go.mod create mode 100644 receiver/apachedruidreceiver/go.sum create mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_config.go create mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_config_test.go create mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_metrics.go create mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_metrics_test.go create mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_resource.go create mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_resource_test.go create mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_status.go create mode 100644 receiver/apachedruidreceiver/internal/metadata/testdata/config.yaml create mode 100644 receiver/apachedruidreceiver/metadata.yaml create mode 100644 receiver/apachedruidreceiver/receiver.go create mode 100644 receiver/apachedruidreceiver/receiver_test.go diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 649ececf7d880..f09dd2264a445 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -177,6 +177,7 @@ body: - receiver/activedirectoryds - receiver/aerospike - receiver/apache + - receiver/apachedruid - receiver/apachespark - receiver/awscloudwatch - receiver/awscloudwatchmetrics diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index f1df2945983c7..139ef2e8c6bdf 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -171,6 +171,7 @@ body: - receiver/activedirectoryds - receiver/aerospike - receiver/apache + - receiver/apachedruid - receiver/apachespark - receiver/awscloudwatch - receiver/awscloudwatchmetrics diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index b68022d3aaca2..9da56fc9589e6 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -171,6 +171,7 @@ body: - receiver/activedirectoryds - receiver/aerospike - receiver/apache + - receiver/apachedruid - receiver/apachespark - receiver/awscloudwatch - receiver/awscloudwatchmetrics diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index a9cfdfac6b206..655110cc78ac6 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -118,6 +118,7 @@ receivers: - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.96.1-0.20240306115632-b2693620eff6 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/activedirectorydsreceiver v0.96.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver v0.96.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver v0.96.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver v0.96.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachesparkreceiver v0.96.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver v0.96.0 @@ -264,6 +265,7 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension => ../../extension/headerssetterextension - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlemanagedprometheusexporter => ../../exporter/googlemanagedprometheusexporter - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver => ../../receiver/aerospikereceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver => ../../receiver/apachedruidreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor => ../../processor/cumulativetodeltaprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/intervalprocessor => ../../processor/intervalprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver => ../../receiver/sapmreceiver @@ -444,4 +446,3 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/solarwindsapmsettingsextension => ../../extension/solarwindsapmsettingsextension - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/namedpipereceiver => ../../receiver/namedpipereceiver - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sqlquery => ../../internal/sqlquery - diff --git a/cmd/otelcontribcol/components.go b/cmd/otelcontribcol/components.go index 7c91bf81f8598..11571803e2491 100644 --- a/cmd/otelcontribcol/components.go +++ b/cmd/otelcontribcol/components.go @@ -118,6 +118,7 @@ import ( transformprocessor "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" activedirectorydsreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/activedirectorydsreceiver" aerospikereceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver" + apachedruidreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver" apachereceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver" apachesparkreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachesparkreceiver" awscloudwatchreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver" @@ -247,6 +248,7 @@ func components() (otelcol.Factories, error) { otlpreceiver.NewFactory(), activedirectorydsreceiver.NewFactory(), aerospikereceiver.NewFactory(), + apachedruidreceiver.NewFactory(), apachereceiver.NewFactory(), apachesparkreceiver.NewFactory(), awscloudwatchreceiver.NewFactory(), diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index fc8168bb5f315..29e0de3d73da3 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -106,6 +106,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/activedirectorydsreceiver v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachesparkreceiver v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver v0.96.0 @@ -835,6 +836,8 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googl replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver => ../../receiver/aerospikereceiver +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver => ../../receiver/apachedruidreceiver + replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor => ../../processor/cumulativetodeltaprocessor replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/intervalprocessor => ../../processor/intervalprocessor diff --git a/receiver/apachedruidreceiver/Makefile b/receiver/apachedruidreceiver/Makefile new file mode 100644 index 0000000000000..c1496226e5905 --- /dev/null +++ b/receiver/apachedruidreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common \ No newline at end of file diff --git a/receiver/apachedruidreceiver/README.md b/receiver/apachedruidreceiver/README.md new file mode 100644 index 0000000000000..ca638b75263fa --- /dev/null +++ b/receiver/apachedruidreceiver/README.md @@ -0,0 +1,44 @@ +# Apache Druid Receiver + + +| Status | | +| ------------- |-----------| +| Stability | [development]: metrics, logs | +| Distributions | [contrib], [observiq], [sumo] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fapachedruid%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fapachedruid) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fapachedruid%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fapachedruid) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@yuanlihan](https://www.github.com/yuanlihan) | + +[development]: https://github.com/open-telemetry/opentelemetry-collector#development +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[observiq]: https://github.com/observIQ/observiq-otel-collector +[sumo]: https://github.com/SumoLogic/sumologic-otel-collector + + +This receiver accepts [metrics](https://druid.apache.org/docs/latest/operations/metrics) data from the [HTTP Emitter](https://druid.apache.org/docs/latest/configuration/#http-emitter-module) of Apache Druid. + +## Prerequisites + +Apache Druid cluster needs to be configured to enable metrics, see [Enabling Metrics](https://druid.apache.org/docs/latest/configuration/#enabling-metrics) and the [HTTP Emitter Module](https://druid.apache.org/docs/latest/configuration/#http-emitter-module). + +## Configuration + +The following configuration options are supported: + +- `endpoint` (default = 0.0.0.0:9000) HTTP service endpoint for the line protocol receiver +- `metrics_path` (default = `/services/collector/metrics`) The path accepting Apache Druid metrics. +- `logs_path` (default = `/services/collector/logs`) The path accepting Apache Druid logs. +- `cluster_name` (default = `default`) The default name of Druid cluster. Note that the [HTTP Emitter](https://druid.apache.org/docs/latest/configuration/#http-emitter-module) of Apache Druid `28.0.0` doesn't include cluster name information in metrics. + +The full list of settings exposed for this receiver are documented in [config.go](config.go). + +Example: + +```yaml +receivers: + apachedruid: + endpoint: 0.0.0.0:9000 +``` + +## Metrics + +Details about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml) diff --git a/receiver/apachedruidreceiver/config.go b/receiver/apachedruidreceiver/config.go new file mode 100644 index 0000000000000..5f5acec4f93c7 --- /dev/null +++ b/receiver/apachedruidreceiver/config.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package apachedruidreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver" + +import ( + "go.opentelemetry.io/collector/config/confighttp" +) + +// Config defines configuration for the Apache Druid receiver. +type Config struct { + confighttp.HTTPServerSettings `mapstructure:",squash"` + + // MetricsPath for metrics data collection, default is '/services/collector/metrics' + MetricsPath string `mapstructure:"metrics_path"` + + // The name of Druid cluster + ClusterName string `mapstructure:"cluster_name"` +} diff --git a/receiver/apachedruidreceiver/doc.go b/receiver/apachedruidreceiver/doc.go new file mode 100644 index 0000000000000..242d91a041219 --- /dev/null +++ b/receiver/apachedruidreceiver/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package apachedruidreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver" diff --git a/receiver/apachedruidreceiver/documentation.md b/receiver/apachedruidreceiver/documentation.md new file mode 100644 index 0000000000000..56bd3de5e0c76 --- /dev/null +++ b/receiver/apachedruidreceiver/documentation.md @@ -0,0 +1,3217 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# apachedruid + +## Default Metrics + +The following metrics are emitted by default. Each of them can be disabled by applying the following configuration: + +```yaml +metrics: + : + enabled: false +``` + +### apachedruid.compact.segment_analyzer.fetch_and_process_millis + +Time taken to fetch and process segments to infer the schema for the compaction task to run. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of task. | Any Str | +| data_source | The data source of compaction task. | Any Str | +| group_id | The group id of compaction task. | Any Str | +| tags | The tags of the compaction task. | Any Str | +| task_id | The task id of compaction task. | Any Str | + +### apachedruid.compact.task.count + +Number of tasks issued in the auto compaction run. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +### apachedruid.compact_task.available_slot.count + +Number of available task slots that can be used for auto compaction tasks in the auto compaction run. This is the max number of task slots minus any currently running compaction tasks. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {slots} | Gauge | Int | + +### apachedruid.compact_task.max_slot.count + +Maximum number of task slots available for auto compaction tasks in the auto compaction run. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {slots} | Gauge | Int | + +### apachedruid.coordinator.global.time + +Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. For example, Historical Management or Indexing. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| duty_group | The name of the duty group. | Any Str | + +### apachedruid.coordinator.time + +Approximate Coordinator duty runtime in milliseconds. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| duty | The name of coordinator duty task. | Any Str | + +### apachedruid.ingest.bytes.received + +Number of bytes received by the `EventReceiverFirehose`. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| task_id | The id of the task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| service_name | The name of ingestion service. | Any Str | + +### apachedruid.ingest.count + +Count of `1` every time an ingestion job runs (includes compaction jobs). Aggregate using dimensions. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | +| task_ingestion_mode | The mode of ingestion task. | Any Str | + +### apachedruid.ingest.events.buffered + +Number of events queued in the `EventReceiverFirehose` buffer. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {events} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| service_name | The name of ingestion service. | Any Str | +| buffer_capacity | The capacity of ingestion buffer. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.events.duplicate + +Number of events rejected because the events are duplicated. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {events} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.events.message_gap + +Time gap in milliseconds between the latest ingested event timestamp and the current system timestamp of metrics emission. If the value is increasing but lag is low, Druid may not be receiving new data. This metric is reset as new tasks spawn up. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.events.processed + +Number of events processed per emission period. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {events} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.events.processed_with_error + +Number of events processed with some partial errors per emission period. Events processed with partial errors are counted towards both this metric and `ingest/events/processed`. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {events} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.events.thrown_away + +Number of events rejected because they are null, or filtered by `transformSpec`, or outside one of `lateMessageRejectionPeriod`, `earlyMessageRejectionPeriod`, or `windowPeriod`. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {events} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.events.unparseable + +Number of events rejected because the events are unparseable. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {events} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.handoff.count + +Number of handoffs that happened. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {handoffs} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.handoff.failed + +Number of handoffs that failed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {handoffs} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.handoff.time + +Total number of milliseconds taken to handoff a set of segments. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.input.bytes + +Number of bytes read from input sources, after decompression but prior to parsing. This covers all data read, including data that does not end up being fully processed and ingested. For example, this includes data that ends up being rejected for being unparseable or filtered out. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.kafka.avg_lag + +Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| stream | The name of stream to ingest. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.kafka.lag + +Total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| stream | The name of stream to ingest. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.kafka.max_lag + +Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| stream | The name of stream to ingest. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.kafka.partition_lag + +Partition-wise lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers. Minimum emission period for this metric is a minute. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| partition | The partition of the topic. | Any Str | +| stream | The name of stream to ingest. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.kinesis.avg_lag.time + +Average lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| stream | The name of stream to ingest. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.kinesis.lag.time + +Total lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| stream | The name of stream to ingest. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.kinesis.max_lag.time + +Max lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| stream | The name of stream to ingest. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.kinesis.partition_lag.time + +Partition-wise lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis. Minimum emission period for this metric is a minute. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| partition | The partition of the topic. | Any Str | +| stream | The name of stream to ingest. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.merge.cpu + +CPU time in Nanoseconds spent on merging intermediate segments. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.merge.time + +Milliseconds spent merging intermediate segments. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ms | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.notices.queue_size + +Number of pending notices to be processed by the coordinator. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {notices} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.notices.time + +Milliseconds taken to process a notice by the supervisor. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.pause.time + +Milliseconds spent by a task in a paused state without ingesting. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | + +### apachedruid.ingest.persists.back_pressure + +Milliseconds spent creating persist tasks and blocking waiting for them to finish. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ms | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.persists.count + +Number of times persist occurred. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.persists.cpu + +CPU time in nanoseconds spent on doing intermediate persist. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.persists.failed + +Number of persists that failed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {persists} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.persists.time + +Milliseconds spent doing intermediate persist. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ms | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.rows.output + +Number of Druid rows persisted. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {rows} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| task_id | The id of the task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | + +### apachedruid.ingest.segments.count + +Count of final segments created by job (includes tombstones). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | +| task_ingestion_mode | The mode of ingestion task. | Any Str | + +### apachedruid.ingest.shuffle.bytes + +Number of bytes shuffled per emission period. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| supervisor_task_id | The task id of supervisor. | Any Str | + +### apachedruid.ingest.shuffle.requests + +Number of shuffle requests per emission period. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {requests} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| supervisor_task_id | The task id of supervisor. | Any Str | + +### apachedruid.ingest.sink.count + +Number of sinks not handed off. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {sinks} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | + +### apachedruid.ingest.tombstones.count + +Count of tombstones created by job. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of ingestion task. | Any Str | +| data_source | The data source of ingestion task. | Any Str | +| group_id | The ingestion group id. | Any Str | +| tags | The names of tags. | Any Str | +| task_id | The id of the task. | Any Str | +| task_ingestion_mode | The mode of ingestion task. | Any Str | + +### apachedruid.interval.compacted.count + +Total number of intervals of this datasource that are already compacted with the spec set in the auto compaction config. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {intervals} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The interval of data source. | Any Str | + +### apachedruid.interval.skip_compact.count + +Total number of intervals of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {intervals} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The interval of data source. | Any Str | + +### apachedruid.interval.wait_compact.count + +Total number of intervals of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {intervals} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The interval of data source. | Any Str | + +### apachedruid.jetty.num_open_connections + +Number of open jetty connections. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {connections} | Gauge | Int | + +### apachedruid.jetty.thread_pool.busy + +Number of busy threads that has work to do from the worker queue. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {threads} | Gauge | Int | + +### apachedruid.jetty.thread_pool.idle + +Number of idle threads. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {threads} | Gauge | Int | + +### apachedruid.jetty.thread_pool.is_low_on_threads + +A rough indicator of whether number of total workable threads allocated is enough to handle the works in the work queue. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {threads} | Gauge | Int | + +### apachedruid.jetty.thread_pool.max + +Number of maximum threads allocatable. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {threads} | Gauge | Int | + +### apachedruid.jetty.thread_pool.min + +Number of minimum threads allocatable. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {threads} | Gauge | Int | + +### apachedruid.jetty.thread_pool.queue_size + +Size of the worker queue. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.jetty.thread_pool.total + +Number of total workable threads allocated. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {threads} | Gauge | Int | + +### apachedruid.jvm.bufferpool.capacity + +Bufferpool capacity. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| bufferpool_name | The name of buffer pool. | Any Str | + +### apachedruid.jvm.bufferpool.count + +Bufferpool count. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| bufferpool_name | The name of buffer pool. | Any Str | + +### apachedruid.jvm.bufferpool.used + +Bufferpool used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| bufferpool_name | The name of buffer pool. | Any Str | + +### apachedruid.jvm.gc.count + +Garbage collection count. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| gc_gen | The name of GC generation. | Any Str | +| gc_name | The gc name of jvm. | Any Str | + +### apachedruid.jvm.gc.cpu + +Count of CPU time in Nanoseconds spent on garbage collection. Note, `jvm/gc/cpu` represents the total time over multiple GC cycles; divide by `jvm/gc/count` to get the mean GC time per cycle. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| gc_gen | The name of GC generation. | Any Str | +| gc_name | The gc name of jvm. | Any Str | + +### apachedruid.jvm.mem.committed + +Committed memory. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| mem_kind | The memory kind of jvm. | Any Str | + +### apachedruid.jvm.mem.init + +Initial memory. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| mem_kind | The memory kind of jvm. | Any Str | + +### apachedruid.jvm.mem.max + +Max memory. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| mem_kind | The memory kind of jvm. | Any Str | + +### apachedruid.jvm.mem.used + +Used memory. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| mem_kind | The memory kind of jvm. | Any Str | + +### apachedruid.jvm.pool.committed + +Committed pool. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| pool_name | The name of the pool. | Any Str | +| pool_kind | The pool kind of jvm. | Any Str | + +### apachedruid.jvm.pool.init + +Initial pool. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| pool_name | The name of the pool. | Any Str | +| pool_kind | The pool kind of jvm. | Any Str | + +### apachedruid.jvm.pool.max + +Max pool. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| pool_name | The name of the pool. | Any Str | +| pool_kind | The pool kind of jvm. | Any Str | + +### apachedruid.jvm.pool.used + +Pool used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| pool_name | The name of the pool. | Any Str | +| pool_kind | The pool kind of jvm. | Any Str | + +### apachedruid.kill.pending_segments.count + +Number of stale pending segments deleted from the metadata store. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source name of the kill task. | Any Str | + +### apachedruid.kill.task.count + +Number of tasks issued in the auto kill run. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +### apachedruid.kill_task.available_slot.count + +Number of available task slots that can be used for auto kill tasks in the auto kill run. This is the max number of task slots minus any currently running auto kill tasks. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {slots} | Gauge | Int | + +### apachedruid.kill_task.max_slot.count + +Maximum number of task slots available for auto kill tasks in the auto kill run. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {slots} | Gauge | Int | + +### apachedruid.merge_buffer.pending_requests + +Number of requests waiting to acquire a batch of buffers from the merge buffer pool. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {requests} | Gauge | Int | + +### apachedruid.metadata.kill.audit.count + +Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. This metric is emitted only when `druid.coordinator.kill.audit.on` is set to true. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +### apachedruid.metadata.kill.compaction.count + +Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. This metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.metadata.kill.datasource.count + +Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run. Note that datasource metadata only exists for datasource created from supervisor. This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.metadata.kill.rule.count + +Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.rule.on` is set to true. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {rules} | Gauge | Int | + +### apachedruid.metadata.kill.supervisor.count + +Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {supervisors} | Gauge | Int | + +### apachedruid.metadatacache.init.time + +Time taken to initialize the broker segment metadata cache. Useful to detect if brokers are taking too long to start. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +### apachedruid.metadatacache.refresh.count + +Number of segments to refresh in broker segment metadata cache. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.metadatacache.refresh.time + +Time taken to refresh segments in broker segment metadata cache. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +### apachedruid.query.byte_limit.exceeded.count + +Number of queries whose inlined subquery results exceeded the given byte limit. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {queries} | Sum | Int | Delta | true | + +### apachedruid.query.bytes + +The total number of bytes returned to the requesting client in the query response from the broker. Other services report the total bytes for their portion of the query. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source name of the query. | Any Str | +| num_metrics | The number of metrics of the query. | Any Str | +| dimension | The dimension of the query. | Any Str | +| has_filters | Whether query has filters. | Any Str | +| threshold | The threshold of query. | Any Int | +| num_complex_metrics | The number of complex metrics. | Any Int | +| type | The type of query. | Any Str | +| remote_address | The remote address of the query. | Any Str | +| id | The id of query. | Any Str | +| context | The context of the query. | Any Str | +| num_dimensions | The number of dimensions of query. | Any Str | +| interval | The interval of the query. | Any Str | +| duration | The duration of query. | Any Str | + +### apachedruid.query.cache.delta.average_bytes + +Average cache entry byte size. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.errors + +Number of cache errors. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {errors} | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.evictions + +Number of cache evictions. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {evictions} | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.hit_rate + +Cache hit rate. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Double | Delta | true | + +### apachedruid.query.cache.delta.hits + +Number of cache hits. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {hits} | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.misses + +Number of cache misses. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {misses} | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.num_entries + +Number of cache entries. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {entries} | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.put.error + +Number of new cache entries that could not be cached due to errors. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {errors} | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.put.ok + +Number of new cache entries successfully cached. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.put.oversized + +Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.size_bytes + +Size in bytes of cache entries. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Delta | true | + +### apachedruid.query.cache.delta.timeouts + +Number of cache timeouts. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {timeouts} | Sum | Int | Delta | true | + +### apachedruid.query.cache.memcached.delta + +Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their delta from the prior event emission. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +### apachedruid.query.cache.memcached.total + +Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their actual values. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.query.cache.total.average_bytes + +Average cache entry byte size. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### apachedruid.query.cache.total.errors + +Number of cache errors. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {errors} | Gauge | Int | + +### apachedruid.query.cache.total.evictions + +Number of cache evictions. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {evictions} | Gauge | Int | + +### apachedruid.query.cache.total.hit_rate + +Cache hit rate. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +### apachedruid.query.cache.total.hits + +Number of cache hits. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {hits} | Gauge | Int | + +### apachedruid.query.cache.total.misses + +Number of cache misses. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {misses} | Gauge | Int | + +### apachedruid.query.cache.total.num_entries + +Number of cache entries. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {entries} | Gauge | Int | + +### apachedruid.query.cache.total.put.error + +Number of new cache entries that could not be cached due to errors. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {errors} | Gauge | Int | + +### apachedruid.query.cache.total.put.ok + +Number of new cache entries successfully cached. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.query.cache.total.put.oversized + +Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.query.cache.total.size_bytes + +Size in bytes of cache entries. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### apachedruid.query.cache.total.timeouts + +Number of cache timeouts. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {timeouts} | Gauge | Int | + +### apachedruid.query.count + +Number of total queries. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {queries} | Sum | Int | Delta | true | + +### apachedruid.query.cpu.time + +Microseconds of CPU time taken to complete a query. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source name of the query. | Any Str | +| num_metrics | The number of metrics of the query. | Any Str | +| dimension | The dimension of the query. | Any Str | +| has_filters | Whether query has filters. | Any Str | +| threshold | The threshold of query. | Any Int | +| num_complex_metrics | The number of complex metrics. | Any Int | +| type | The type of query. | Any Str | +| remote_address | The remote address of the query. | Any Str | +| id | The id of query. | Any Str | +| context | The context of the query. | Any Str | +| num_dimensions | The number of dimensions of query. | Any Str | +| interval | The interval of the query. | Any Str | +| duration | The duration of query. | Any Str | + +### apachedruid.query.failed.count + +Number of failed queries. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {queries} | Sum | Int | Delta | true | + +### apachedruid.query.interrupted.count + +Number of queries interrupted due to cancellation. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {queries} | Sum | Int | Delta | true | + +### apachedruid.query.node.backpressure + +Milliseconds that the channel to this process has spent suspended due to backpressure. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| status | The status of the query. | Any Str | +| server | The server of the query. | Any Str | +| id | The id of query. | Any Str | + +### apachedruid.query.node.bytes + +Number of bytes returned from querying individual historical/realtime processes. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| status | The status of the query. | Any Str | +| server | The server of the query. | Any Str | +| id | The id of query. | Any Str | + +### apachedruid.query.node.time + +Milliseconds taken to query individual historical/realtime processes. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| status | The status of the query. | Any Str | +| server | The server of the query. | Any Str | +| id | The id of query. | Any Str | + +### apachedruid.query.node.ttfb + +Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| status | The status of the query. | Any Str | +| server | The server of the query. | Any Str | +| id | The id of query. | Any Str | + +### apachedruid.query.priority + +Assigned lane and priority, only if Laning strategy is enabled. Refer to [Laning strategies](https,//druid.apache.org/docs/latest/configuration#laning-strategies). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| type | The type of query. | Any Str | +| data_source | The data source name of the query. | Any Str | +| lane | The name of query lane. | Any Str | + +### apachedruid.query.row_limit.exceeded.count + +Number of queries whose inlined subquery results exceeded the given row limit. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {queries} | Sum | Int | Delta | true | + +### apachedruid.query.segment.time + +Milliseconds taken to query individual segment. Includes time to page in the segment from disk. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| status | The status of the query. | Any Str | +| segment | The segment of the query. | Any Str | +| id | The id of query. | Any Str | +| vectorized | Whether query is vectorized. | Any Str | + +### apachedruid.query.segment_and_cache.time + +Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| segment | The segment of the query. | Any Str | +| id | The id of query. | Any Str | + +### apachedruid.query.segments.count + +This metric is not enabled by default. See the `QueryMetrics` Interface for reference regarding enabling this metric. Number of segments that will be touched by the query. In the broker, it makes a plan to distribute the query to realtime tasks and historicals based on a snapshot of segment distribution state. If there are some segments moved after this snapshot is created, certain historicals and realtime tasks can report those segments as missing to the broker. The broker will resend the query to the new servers that serve those segments after move. In this case, those segments can be counted more than once in this metric. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {segments} | Sum | Int | Delta | true | + +### apachedruid.query.success.count + +Number of queries successfully processed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {queries} | Sum | Int | Delta | true | + +### apachedruid.query.time + +Milliseconds taken to complete a query. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source name of the query. | Any Str | +| num_metrics | The number of metrics of the query. | Any Str | +| dimension | The dimension of the query. | Any Str | +| has_filters | Whether query has filters. | Any Str | +| threshold | The threshold of query. | Any Int | +| num_complex_metrics | The number of complex metrics. | Any Int | +| type | The type of query. | Any Str | +| remote_address | The remote address of the query. | Any Str | +| id | The id of query. | Any Str | +| context | The context of the query. | Any Str | +| num_dimensions | The number of dimensions of query. | Any Str | +| interval | The interval of the query. | Any Str | +| duration | The duration of query. | Any Str | + +### apachedruid.query.timeout.count + +Number of timed out queries. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {queries} | Sum | Int | Delta | true | + +### apachedruid.query.wait.time + +Milliseconds spent waiting for a segment to be scanned. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| segment | The segment of the query. | Any Str | +| id | The id of query. | Any Str | + +### apachedruid.segment.added.bytes + +Size in bytes of new segments created. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The task type of the segment. | Any Str | +| data_source | The data source of the segment. | Any Str | +| group_id | The group id of segment. | Any Str | +| tags | The tags of the segment. | Any Str | +| task_id | The task id of segment. | Any Str | +| interval | The interval of segment. | Any Str | + +### apachedruid.segment.assign_skipped.count + +Number of segments that could not be assigned to any server for loading. This can occur due to replication throttling, no available disk space, or a full load queue. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| description | The description of segment. | Any Str | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.assigned.count + +Number of segments assigned to be loaded in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {segments} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.compacted.bytes + +Total bytes of this datasource that are already compacted with the spec set in the auto compaction config. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.compacted.count + +Total number of segments of this datasource that are already compacted with the spec set in the auto compaction config. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {segments} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.count + +Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| priority | The priority of segment. | Any Str | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.deleted.count + +Number of segments marked as unused due to drop rules. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {segments} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.drop_queue.count + +Number of segments to drop. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| server | The server of the segment. | Any Str | + +### apachedruid.segment.drop_skipped.count + +Number of segments that could not be dropped from any server. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| description | The description of segment. | Any Str | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.dropped.count + +Number of segments chosen to be dropped from the cluster due to being over-replicated. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {segments} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.load_queue.assigned + +Number of segments assigned for load or drop to the load queue of a server. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {segments} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| server | The server of the segment. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.load_queue.cancelled + +Number of segment assignments that were canceled before completion. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| server | The server of the segment. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.load_queue.count + +Number of segments to load. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| server | The server of the segment. | Any Str | + +### apachedruid.segment.load_queue.failed + +Number of segment assignments that failed to complete. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| server | The server of the segment. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.load_queue.size + +Size in bytes of segments to load. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| server | The server of the segment. | Any Str | + +### apachedruid.segment.load_queue.success + +Number of segment assignments that completed successfully. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| server | The server of the segment. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.max + +Maximum byte limit available for segments. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.segment.move_skipped.count + +Number of segments that were chosen for balancing but could not be moved. This can occur when segments are already optimally placed. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| description | The description of segment. | Any Str | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.moved.bytes + +Size in bytes of segments moved/archived via the Move Task. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The task type of the segment. | Any Str | +| data_source | The data source of the segment. | Any Str | +| group_id | The group id of segment. | Any Str | +| tags | The tags of the segment. | Any Str | +| task_id | The task id of segment. | Any Str | +| interval | The interval of segment. | Any Str | + +### apachedruid.segment.moved.count + +Number of segments moved in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {segments} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.nuked.bytes + +Size in bytes of segments deleted via the Kill Task. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The task type of the segment. | Any Str | +| data_source | The data source of the segment. | Any Str | +| group_id | The group id of segment. | Any Str | +| tags | The tags of the segment. | Any Str | +| task_id | The task id of segment. | Any Str | +| interval | The interval of segment. | Any Str | + +### apachedruid.segment.over_shadowed.count + +Number of segments marked as unused due to being overshadowed. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +### apachedruid.segment.pending_delete + +On-disk size in bytes of segments that are waiting to be cleared out. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### apachedruid.segment.row_count.avg + +The average number of rows per segment on a historical. `SegmentStatsMonitor` must be enabled. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {rows} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| priority | The priority of segment. | Any Str | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.row_count.range.count + +The number of segments in a bucket. `SegmentStatsMonitor` must be enabled. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| priority | The priority of segment. | Any Str | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | +| range | The range of segment. | Any Str | + +### apachedruid.segment.scan.active + +Number of segments currently scanned. This metric also indicates how many threads from `druid.processing.numThreads` are currently being used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +### apachedruid.segment.scan.pending + +Number of segments in queue waiting to be scanned. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +### apachedruid.segment.size + +Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.skip_compact.bytes + +Total bytes of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.skip_compact.count + +Total number of segments of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.unavailable.count + +Number of unique segments left to load until all used segments are available for queries. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.under_replicated.count + +Number of segments, including replicas, left to load until all used segments are available for queries. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.unneeded.count + +Number of segments dropped due to being marked as unused. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.used + +Bytes used for served segments. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| priority | The priority of segment. | Any Str | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.used_percent + +Percentage of space used by served segments. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| priority | The priority of segment. | Any Str | +| tier | The name of segment tier. | Any Str | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.wait_compact.bytes + +Total bytes of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.segment.wait_compact.count + +Total number of segments of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {segments} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the segment. | Any Str | + +### apachedruid.serverview.init.time + +Time taken to initialize the broker server view. Useful to detect if brokers are taking too long to start. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +### apachedruid.serverview.sync.healthy + +Sync status of the Broker with a segment-loading server such as a Historical or Peon. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. This metric can be used in conjunction with `serverview/sync/unstableTime` to debug slow startup of Brokers. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of the tier. | Any Str | +| server | The address of server. | Any Str | + +### apachedruid.serverview.sync.unstable_time + +Time in milliseconds for which the Broker has been failing to sync with a segment-loading server. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of the tier. | Any Str | +| server | The address of server. | Any Str | + +### apachedruid.sql_query.bytes + +Number of bytes returned in the SQL query response. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source name of the query. | Any Str | +| native_query_ids | The native query ids of sql query. | Any Str | +| engine | The engine name of the sql query. | Any Str | +| remote_address | The remote address of sql query. | Any Str | +| id | The id of sql query. | Any Str | +| success | Whether sql query is successful. | Any Str | + +### apachedruid.sql_query.planning_time_ms + +Milliseconds taken to plan a SQL to native query. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source name of the query. | Any Str | +| native_query_ids | The native query ids of sql query. | Any Str | +| engine | The engine name of the sql query. | Any Str | +| remote_address | The remote address of sql query. | Any Str | +| id | The id of sql query. | Any Str | +| success | Whether sql query is successful. | Any Str | + +### apachedruid.sql_query.time + +Milliseconds taken to complete a SQL query. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source name of the query. | Any Str | +| native_query_ids | The native query ids of sql query. | Any Str | +| engine | The engine name of the sql query. | Any Str | +| remote_address | The remote address of sql query. | Any Str | +| id | The id of sql query. | Any Str | +| success | Whether sql query is successful. | Any Str | + +### apachedruid.subquery.byte_limit.count + +Number of subqueries whose results are materialized as frames (Druid's internal byte representation of rows). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {subqueries} | Sum | Int | Delta | true | + +### apachedruid.subquery.fallback.count + +Number of subqueries which cannot be materialized as frames. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {subqueries} | Sum | Int | Delta | true | + +### apachedruid.subquery.fallback.insufficient_type.count + +Number of subqueries which cannot be materialized as frames due to insufficient type information in the row signature. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {subqueries} | Sum | Int | Delta | true | + +### apachedruid.subquery.fallback.unknown_reason.count + +Number of subqueries which cannot be materialized as frames due other reasons. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {subqueries} | Sum | Int | Delta | true | + +### apachedruid.subquery.row_limit.count + +Number of subqueries whose results are materialized as rows (Java objects on heap). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {subqueries} | Sum | Int | Delta | true | + +### apachedruid.sys.cpu + +CPU used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| cpu_time | The group name of cpu time usage. | Any Str | +| cpu_name | The group name of cpu usage. | Any Str | + +### apachedruid.sys.disk.queue + +Disk queue length. Measures number of requests waiting to be processed by disk. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| disk_name | The name of disk. | Any Str | + +### apachedruid.sys.disk.read.count + +Reads from disk. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| disk_name | The name of disk. | Any Str | + +### apachedruid.sys.disk.read.size + +Bytes read from disk. One indicator of the amount of paging occurring for segments. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| disk_name | The name of disk. | Any Str | + +### apachedruid.sys.disk.transfer_time + +Transfer time to read from or write to disk. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| disk_name | The name of disk. | Any Str | + +### apachedruid.sys.disk.write.count + +Writes to disk. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| disk_name | The name of disk. | Any Str | + +### apachedruid.sys.disk.write.size + +Bytes written to disk. One indicator of the amount of paging occurring for segments. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| disk_name | The name of disk. | Any Str | + +### apachedruid.sys.fs.files.count + +Filesystem total IO nodes. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| fs_dir_name | The dir name. | Any Str | +| fs_dev_name | The dev name. | Any Str | + +### apachedruid.sys.fs.files.free + +Filesystem free IO nodes. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| fs_dir_name | The dir name. | Any Str | +| fs_dev_name | The dev name. | Any Str | + +### apachedruid.sys.fs.max + +Filesystem bytes max. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| fs_dir_name | The dir name. | Any Str | +| fs_dev_name | The dev name. | Any Str | + +### apachedruid.sys.fs.used + +Filesystem bytes used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| fs_dir_name | The dir name. | Any Str | +| fs_dev_name | The dev name. | Any Str | + +### apachedruid.sys.la.1 + +System CPU load averages over past `i` minutes, where `i={1,5,15}`. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.la.15 + +System CPU load averages over past `i` minutes, where `i={1,5,15}`. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.la.5 + +System CPU load averages over past `i` minutes, where `i={1,5,15}`. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.mem.free + +Memory free. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### apachedruid.sys.mem.max + +Memory max. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### apachedruid.sys.mem.used + +Memory used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### apachedruid.sys.net.read.dropped + +Total packets dropped coming from network. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| net_hwaddr | The net hardware address. | Any Str | +| net_name | The name of network. | Any Str | +| net_address | The net address. | Any Str | + +### apachedruid.sys.net.read.errors + +Total network read errors. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| net_hwaddr | The net hardware address. | Any Str | +| net_name | The name of network. | Any Str | +| net_address | The net address. | Any Str | + +### apachedruid.sys.net.read.packets + +Total packets read from the network. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| net_hwaddr | The net hardware address. | Any Str | +| net_name | The name of network. | Any Str | +| net_address | The net address. | Any Str | + +### apachedruid.sys.net.read.size + +Bytes read from the network. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| net_hwaddr | The net hardware address. | Any Str | +| net_name | The name of network. | Any Str | +| net_address | The net address. | Any Str | + +### apachedruid.sys.net.write.collisions + +Total network write collisions. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| net_hwaddr | The net hardware address. | Any Str | +| net_name | The name of network. | Any Str | +| net_address | The net address. | Any Str | + +### apachedruid.sys.net.write.errors + +Total network write errors. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| net_hwaddr | The net hardware address. | Any Str | +| net_name | The name of network. | Any Str | +| net_address | The net address. | Any Str | + +### apachedruid.sys.net.write.packets + +Total packets written to the network. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| net_hwaddr | The net hardware address. | Any Str | +| net_name | The name of network. | Any Str | +| net_address | The net address. | Any Str | + +### apachedruid.sys.net.write.size + +Bytes written to the network. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| net_hwaddr | The net hardware address. | Any Str | +| net_name | The name of network. | Any Str | +| net_address | The net address. | Any Str | + +### apachedruid.sys.storage.used + +Disk space used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| fs_dir_name | The dir name. | Any Str | + +### apachedruid.sys.swap.free + +Free swap. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### apachedruid.sys.swap.max + +Max swap. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### apachedruid.sys.swap.page_in + +Paged in swap. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.swap.page_out + +Paged out swap. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.active_opens + +Total TCP active open connections. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.attempt_fails + +Total TCP active connection failures. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.estab_resets + +Total TCP connection resets. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.in.errs + +Errors while reading segments. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.in.segs + +Total segments received in connection. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.out.rsts + +Total `out reset` packets sent to reset the connection. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.out.segs + +Total segments sent. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.passive_opens + +Total TCP passive open connections. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.tcpv4.retrans.segs + +Total segments re-transmitted. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.sys.uptime + +Total system uptime. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Int | + +### apachedruid.task.action.batch.attempts + +Number of execution attempts for a single batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {attempts} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interval | The interval of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| task_action_type | The action type of task. | Any Str | + +### apachedruid.task.action.batch.queue_time + +Milliseconds spent by a batch of task actions in queue. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interval | The interval of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| task_action_type | The action type of task. | Any Str | + +### apachedruid.task.action.batch.run_time + +Milliseconds taken to execute a batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interval | The interval of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| task_action_type | The action type of task. | Any Str | + +### apachedruid.task.action.batch.size + +Number of task actions in a batch that was executed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {actions} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interval | The interval of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| task_action_type | The action type of task. | Any Str | + +### apachedruid.task.action.failed.count + +Number of task actions that failed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {actions} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| task_action_type | The action type of task. | Any Str | +| group_id | The group id of the task. | Any Str | +| tags | The tags of task. | Any Str | +| task_id | The id of task. | Any Str | + +### apachedruid.task.action.log.time + +Milliseconds taken to log a task action to the audit log. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| task_action_type | The action type of task. | Any Str | +| group_id | The group id of the task. | Any Str | +| tags | The tags of task. | Any Str | +| task_id | The id of task. | Any Str | + +### apachedruid.task.action.run.time + +Milliseconds taken to execute a task action. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| task_action_type | The action type of task. | Any Str | +| group_id | The group id of the task. | Any Str | +| tags | The tags of task. | Any Str | +| task_id | The id of task. | Any Str | + +### apachedruid.task.action.success.count + +Number of task actions that were executed successfully during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {actions} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| task_action_type | The action type of task. | Any Str | +| group_id | The group id of the task. | Any Str | +| tags | The tags of task. | Any Str | +| task_id | The id of task. | Any Str | + +### apachedruid.task.failed.count + +Number of failed tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the task. | Any Str | + +### apachedruid.task.pending.count + +Number of current pending tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the task. | Any Str | + +### apachedruid.task.pending.time + +Milliseconds taken for a task to wait for running. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| group_id | The group id of the task. | Any Str | +| tags | The tags of task. | Any Str | +| task_id | The id of task. | Any Str | + +### apachedruid.task.run.time + +Milliseconds taken to run a task. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| group_id | The group id of the task. | Any Str | +| task_status | The status of the task. | Any Str | +| tags | The tags of task. | Any Str | +| task_id | The id of task. | Any Str | + +### apachedruid.task.running.count + +Number of current running tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the task. | Any Str | + +### apachedruid.task.segment_availability.wait.time + +The amount of milliseconds a batch indexing task waited for newly created segments to become available for querying. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| task_type | The type of task. | Any Str | +| data_source | The data source of the task. | Any Str | +| group_id | The group id of the task. | Any Str | +| segment_availability_confirmed | Whether segment availability is confirmed. | Any Str | +| tags | The tags of task. | Any Str | +| task_id | The id of task. | Any Str | + +### apachedruid.task.success.count + +Number of successful tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the task. | Any Str | + +### apachedruid.task.waiting.count + +Number of current waiting tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| data_source | The data source of the task. | Any Str | + +### apachedruid.task_slot.blacklisted.count + +Number of total task slots in blacklisted Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {slots} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of task slot. | Any Str | + +### apachedruid.task_slot.idle.count + +Number of idle task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {slots} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of task slot. | Any Str | + +### apachedruid.task_slot.lazy.count + +Number of total task slots in lazy marked Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {slots} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of task slot. | Any Str | + +### apachedruid.task_slot.total.count + +Number of total task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {slots} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of task slot. | Any Str | + +### apachedruid.task_slot.used.count + +Number of busy task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {slots} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of task slot. | Any Str | + +### apachedruid.tier.historical.count + +Number of available historical nodes in each tier. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of tier. | Any Str | + +### apachedruid.tier.replication.factor + +Configured maximum replication factor in each tier. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of tier. | Any Str | + +### apachedruid.tier.required.capacity + +Total capacity in bytes required in each tier. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of tier. | Any Str | + +### apachedruid.tier.total.capacity + +Total capacity in bytes available in each tier. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| tier | The name of tier. | Any Str | + +### apachedruid.worker.task.failed.count + +Number of failed tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of worker. | Any Str | +| worker_version | The verson of worker. | Any Str | + +### apachedruid.worker.task.success.count + +Number of successful tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tasks} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of worker. | Any Str | +| worker_version | The verson of worker. | Any Str | + +### apachedruid.worker.task_slot.idle.count + +Number of idle task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {slots} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of worker. | Any Str | +| worker_version | The verson of worker. | Any Str | + +### apachedruid.worker.task_slot.total.count + +Number of total task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {slots} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of worker. | Any Str | +| worker_version | The verson of worker. | Any Str | + +### apachedruid.worker.task_slot.used.count + +Number of busy task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {slots} | Sum | Int | Delta | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| category | The category of worker. | Any Str | +| worker_version | The verson of worker. | Any Str | + +### apachedruid.zk.connected + +Indicator of connection status. `1` for connected, `0` for disconnected. Emitted once per monitor period. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### apachedruid.zk.reconnect.time + +Amount of time, in milliseconds, that a server was disconnected from ZooKeeper before reconnecting. Emitted on reconnection. Not emitted if connection to ZooKeeper is permanently lost, because in this case, there is no reconnection. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +## Resource Attributes + +| Name | Description | Values | Enabled | +| ---- | ----------- | ------ | ------- | +| apachedruid.cluster.name | The name of the apachedruid cluster. | Any Str | true | +| apachedruid.node.host | The name of the apachedruid node. | Any Str | true | +| apachedruid.node.service | The service name of the apachedruid node. | Any Str | true | diff --git a/receiver/apachedruidreceiver/factory.go b/receiver/apachedruidreceiver/factory.go new file mode 100644 index 0000000000000..8b6e02a80c1e6 --- /dev/null +++ b/receiver/apachedruidreceiver/factory.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package apachedruidreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver/internal/metadata" +) + +func NewFactory() receiver.Factory { + return receiver.NewFactory( + metadata.Type, + createDefaultConfig, + receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability)) +} + +// createDefaultConfig creates the default configuration for receiver. +func createDefaultConfig() component.Config { + return &Config{ + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "0.0.0.0:9000", + }, + MetricsPath: "/services/collector/metrics", + ClusterName: "default", + } +} + +func createMetricsReceiver(_ context.Context, params receiver.CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (receiver.Metrics, error) { + return newMetricsReceiver(cfg.(*Config), params, nextConsumer) +} diff --git a/receiver/apachedruidreceiver/generated_component_test.go b/receiver/apachedruidreceiver/generated_component_test.go new file mode 100644 index 0000000000000..0d79e7456cb5e --- /dev/null +++ b/receiver/apachedruidreceiver/generated_component_test.go @@ -0,0 +1,68 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package apachedruidreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + name string + createFn func(ctx context.Context, set receiver.CreateSettings, cfg component.Config) (component.Component, error) + }{ + + { + name: "logs", + createFn: func(ctx context.Context, set receiver.CreateSettings, cfg component.Config) (component.Component, error) { + return factory.CreateLogsReceiver(ctx, set, cfg, consumertest.NewNop()) + }, + }, + + { + name: "metrics", + createFn: func(ctx context.Context, set receiver.CreateSettings, cfg component.Config) (component.Component, error) { + return factory.CreateMetricsReceiver(ctx, set, cfg, consumertest.NewNop()) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), receivertest.NewNopCreateSettings(), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopCreateSettings(), cfg) + require.NoError(t, err) + host := componenttest.NewNopHost() + require.NoError(t, err) + require.NoError(t, firstRcvr.Start(context.Background(), host)) + require.NoError(t, firstRcvr.Shutdown(context.Background())) + secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopCreateSettings(), cfg) + require.NoError(t, err) + require.NoError(t, secondRcvr.Start(context.Background(), host)) + require.NoError(t, secondRcvr.Shutdown(context.Background())) + }) + } +} diff --git a/receiver/apachedruidreceiver/go.mod b/receiver/apachedruidreceiver/go.mod new file mode 100644 index 0000000000000..1921d588dcd0d --- /dev/null +++ b/receiver/apachedruidreceiver/go.mod @@ -0,0 +1,70 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver + +go 1.21 + +require ( + github.com/google/go-cmp v0.6.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.90.0 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/component v0.90.0 + go.opentelemetry.io/collector/config/confighttp v0.90.0 + go.opentelemetry.io/collector/confmap v0.90.0 + go.opentelemetry.io/collector/consumer v0.90.0 + go.opentelemetry.io/collector/pdata v1.0.0 + go.opentelemetry.io/collector/receiver v0.90.0 + go.opentelemetry.io/otel/metric v1.21.0 + go.opentelemetry.io/otel/trace v1.21.0 + go.uber.org/zap v1.27.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.3 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.0.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rs/cors v1.10.1 // indirect + go.opentelemetry.io/collector v0.90.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.90.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.90.0 // indirect + go.opentelemetry.io/collector/config/configopaque v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.90.0 // indirect + go.opentelemetry.io/collector/config/internal v0.90.0 // indirect + go.opentelemetry.io/collector/extension v0.90.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.90.0 // indirect + go.opentelemetry.io/collector/featuregate v1.3.1-0.20240306115632-b2693620eff6 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.18.0 // indirect + golang.org/x/sys v0.14.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +retract ( + v0.76.2 + v0.76.1 + v0.65.0 +) diff --git a/receiver/apachedruidreceiver/go.sum b/receiver/apachedruidreceiver/go.sum new file mode 100644 index 0000000000000..d4741a4bffedf --- /dev/null +++ b/receiver/apachedruidreceiver/go.sum @@ -0,0 +1,154 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= +github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.0.1 h1:1dYGITt1I23x8cfx8ZnldtezdyaZtfAuRtIFOiRzK7g= +github.com/knadh/koanf/v2 v2.0.1/go.mod h1:ZeiIlIDXTE7w1lMT6UVcNiRAS2/rCeLn/GdLNvY1Dus= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= +github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/collector v0.90.0 h1:Wyiiu+78tV5zZDvza9hvZu6FgOkFqURNzPHkKcI+asw= +go.opentelemetry.io/collector v0.90.0/go.mod h1:qRhpGBXozKMn+7SiniobhcZ0AbCSWdYqL+XM3gnwejQ= +go.opentelemetry.io/collector/component v0.90.0 h1:rufHQfFpZQ4mc30GAsW6JSm1DvJWCGjoyw+dNXpgTV8= +go.opentelemetry.io/collector/component v0.90.0/go.mod h1:+WX5h5I98AwL256AdFvn8EpPZ02Q+UrKo9AdI8LLfuQ= +go.opentelemetry.io/collector/config/configauth v0.90.0 h1:lt/02ssxsoGXOsj3sGrn6NXIjOoFEXyK/t70lvr7EWo= +go.opentelemetry.io/collector/config/configauth v0.90.0/go.mod h1:tHCeUhnik4RrLuiHuyDMRy7YxjMnXb/PCm7jdkmyfyc= +go.opentelemetry.io/collector/config/configcompression v0.90.0 h1:5y5sGbvo0NZKJo6soxhxWHPbfwfc+XuzN6L44M6aDoo= +go.opentelemetry.io/collector/config/configcompression v0.90.0/go.mod h1:LaavoxZsro5lL7qh1g9DMifG0qixWPEecW18Qr8bpag= +go.opentelemetry.io/collector/config/confighttp v0.90.0 h1:trgTrKp3hzyCMO8RDtPTfrnia6h1qhr8QOqS5Sizl6M= +go.opentelemetry.io/collector/config/confighttp v0.90.0/go.mod h1:viutRIlajhHWuR3snu1RLako3b+Rd3MM0OfRDhIuicM= +go.opentelemetry.io/collector/config/configopaque v0.90.0 h1:tnuwVWaKbPIhgLawcU4xnex53tJbQsecNq86eZRz1rE= +go.opentelemetry.io/collector/config/configopaque v0.90.0/go.mod h1:TPCHaU+QXiEV+JXbgyr6mSErTI9chwQyasDVMdJr3eY= +go.opentelemetry.io/collector/config/configtelemetry v0.90.0 h1:1exyNLDVSSkdDLUoVTLiy5pfzB7ak802JhOaOTOe2Zo= +go.opentelemetry.io/collector/config/configtelemetry v0.90.0/go.mod h1:+LAXM5WFMW/UbTlAuSs6L/W72WC+q8TBJt/6z39FPOU= +go.opentelemetry.io/collector/config/configtls v0.90.0 h1:bsPZkh5ejlIk/XwLdzz91empM3STU8xr6yArqMVYxJ4= +go.opentelemetry.io/collector/config/configtls v0.90.0/go.mod h1:eLLgpNPxHAtAynKCJN7p9O7GIDEIRKfjsFJs3BQazyg= +go.opentelemetry.io/collector/config/internal v0.90.0 h1:CVRGxmXupYOcLGgYjWb5XmFI9oWmvRD4NwzoasjolUs= +go.opentelemetry.io/collector/config/internal v0.90.0/go.mod h1:42VsQ/1kP2qnvzjNi+dfNP+KyCFRADejyrJ8m2GVL3M= +go.opentelemetry.io/collector/confmap v0.90.0 h1:vU+759p/4zLeet8yeI8uVq4+xCm73/5K8t2Tx0MzX/8= +go.opentelemetry.io/collector/confmap v0.90.0/go.mod h1:uxV+fZ85kG31oovL6Cl3fAMQ3RRPwUvfAbbA9WT1Yhk= +go.opentelemetry.io/collector/consumer v0.90.0 h1:5cScUTbv9PIvI/bKTa2GbAn/LAMwcg2znAb0UKfhVy4= +go.opentelemetry.io/collector/consumer v0.90.0/go.mod h1:mh/eEA0UClEtgQMDICQVL7oSylgbskFfueBO0i5HkSQ= +go.opentelemetry.io/collector/extension v0.90.0 h1:NDvZneZEapDeOD195kDZiEW8IUb2SimmkI/CrKfy+WA= +go.opentelemetry.io/collector/extension v0.90.0/go.mod h1:vUiLcJQuM04CuyCf6AbjW8OCSeINSU4242GPVzTzX9w= +go.opentelemetry.io/collector/extension/auth v0.90.0 h1:L5UfHQ0jXMllC7nB4l9EAXeAEExlsvwJOr22sB+55Cs= +go.opentelemetry.io/collector/extension/auth v0.90.0/go.mod h1:x/U5M+J3Xjmcec94j3v79s8vjsLMaUrN5abjcal0sEw= +go.opentelemetry.io/collector/featuregate v1.3.1-0.20240306115632-b2693620eff6 h1:WPX5pMQgNPvjLrtQ+XoBBsbyhy1m1JtYc1B/rIFhCnQ= +go.opentelemetry.io/collector/featuregate v1.3.1-0.20240306115632-b2693620eff6/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= +go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= +go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= +go.opentelemetry.io/collector/receiver v0.90.0 h1:cVp1s9c9kSfn5ZTXb9o8nlZnLEgs2gutEYzty5+eUEI= +go.opentelemetry.io/collector/receiver v0.90.0/go.mod h1:oRmH7WKmkJo7tgc7odoArLXjrz2TZdcw7pco0KRZjWo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_config.go b/receiver/apachedruidreceiver/internal/metadata/generated_config.go new file mode 100644 index 0000000000000..34b1120429ebc --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/generated_config.go @@ -0,0 +1,1066 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import "go.opentelemetry.io/collector/confmap" + +// MetricConfig provides common config for a particular metric. +type MetricConfig struct { + Enabled bool `mapstructure:"enabled"` + + enabledSetByUser bool +} + +func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(ms) + if err != nil { + return err + } + ms.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// MetricsConfig provides config for apachedruid metrics. +type MetricsConfig struct { + ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis MetricConfig `mapstructure:"apachedruid.compact.segment_analyzer.fetch_and_process_millis"` + ApachedruidCompactTaskCount MetricConfig `mapstructure:"apachedruid.compact.task.count"` + ApachedruidCompactTaskAvailableSlotCount MetricConfig `mapstructure:"apachedruid.compact_task.available_slot.count"` + ApachedruidCompactTaskMaxSlotCount MetricConfig `mapstructure:"apachedruid.compact_task.max_slot.count"` + ApachedruidCoordinatorGlobalTime MetricConfig `mapstructure:"apachedruid.coordinator.global.time"` + ApachedruidCoordinatorTime MetricConfig `mapstructure:"apachedruid.coordinator.time"` + ApachedruidIngestBytesReceived MetricConfig `mapstructure:"apachedruid.ingest.bytes.received"` + ApachedruidIngestCount MetricConfig `mapstructure:"apachedruid.ingest.count"` + ApachedruidIngestEventsBuffered MetricConfig `mapstructure:"apachedruid.ingest.events.buffered"` + ApachedruidIngestEventsDuplicate MetricConfig `mapstructure:"apachedruid.ingest.events.duplicate"` + ApachedruidIngestEventsMessageGap MetricConfig `mapstructure:"apachedruid.ingest.events.message_gap"` + ApachedruidIngestEventsProcessed MetricConfig `mapstructure:"apachedruid.ingest.events.processed"` + ApachedruidIngestEventsProcessedWithError MetricConfig `mapstructure:"apachedruid.ingest.events.processed_with_error"` + ApachedruidIngestEventsThrownAway MetricConfig `mapstructure:"apachedruid.ingest.events.thrown_away"` + ApachedruidIngestEventsUnparseable MetricConfig `mapstructure:"apachedruid.ingest.events.unparseable"` + ApachedruidIngestHandoffCount MetricConfig `mapstructure:"apachedruid.ingest.handoff.count"` + ApachedruidIngestHandoffFailed MetricConfig `mapstructure:"apachedruid.ingest.handoff.failed"` + ApachedruidIngestHandoffTime MetricConfig `mapstructure:"apachedruid.ingest.handoff.time"` + ApachedruidIngestInputBytes MetricConfig `mapstructure:"apachedruid.ingest.input.bytes"` + ApachedruidIngestKafkaAvgLag MetricConfig `mapstructure:"apachedruid.ingest.kafka.avg_lag"` + ApachedruidIngestKafkaLag MetricConfig `mapstructure:"apachedruid.ingest.kafka.lag"` + ApachedruidIngestKafkaMaxLag MetricConfig `mapstructure:"apachedruid.ingest.kafka.max_lag"` + ApachedruidIngestKafkaPartitionLag MetricConfig `mapstructure:"apachedruid.ingest.kafka.partition_lag"` + ApachedruidIngestKinesisAvgLagTime MetricConfig `mapstructure:"apachedruid.ingest.kinesis.avg_lag.time"` + ApachedruidIngestKinesisLagTime MetricConfig `mapstructure:"apachedruid.ingest.kinesis.lag.time"` + ApachedruidIngestKinesisMaxLagTime MetricConfig `mapstructure:"apachedruid.ingest.kinesis.max_lag.time"` + ApachedruidIngestKinesisPartitionLagTime MetricConfig `mapstructure:"apachedruid.ingest.kinesis.partition_lag.time"` + ApachedruidIngestMergeCPU MetricConfig `mapstructure:"apachedruid.ingest.merge.cpu"` + ApachedruidIngestMergeTime MetricConfig `mapstructure:"apachedruid.ingest.merge.time"` + ApachedruidIngestNoticesQueueSize MetricConfig `mapstructure:"apachedruid.ingest.notices.queue_size"` + ApachedruidIngestNoticesTime MetricConfig `mapstructure:"apachedruid.ingest.notices.time"` + ApachedruidIngestPauseTime MetricConfig `mapstructure:"apachedruid.ingest.pause.time"` + ApachedruidIngestPersistsBackPressure MetricConfig `mapstructure:"apachedruid.ingest.persists.back_pressure"` + ApachedruidIngestPersistsCount MetricConfig `mapstructure:"apachedruid.ingest.persists.count"` + ApachedruidIngestPersistsCPU MetricConfig `mapstructure:"apachedruid.ingest.persists.cpu"` + ApachedruidIngestPersistsFailed MetricConfig `mapstructure:"apachedruid.ingest.persists.failed"` + ApachedruidIngestPersistsTime MetricConfig `mapstructure:"apachedruid.ingest.persists.time"` + ApachedruidIngestRowsOutput MetricConfig `mapstructure:"apachedruid.ingest.rows.output"` + ApachedruidIngestSegmentsCount MetricConfig `mapstructure:"apachedruid.ingest.segments.count"` + ApachedruidIngestShuffleBytes MetricConfig `mapstructure:"apachedruid.ingest.shuffle.bytes"` + ApachedruidIngestShuffleRequests MetricConfig `mapstructure:"apachedruid.ingest.shuffle.requests"` + ApachedruidIngestSinkCount MetricConfig `mapstructure:"apachedruid.ingest.sink.count"` + ApachedruidIngestTombstonesCount MetricConfig `mapstructure:"apachedruid.ingest.tombstones.count"` + ApachedruidIntervalCompactedCount MetricConfig `mapstructure:"apachedruid.interval.compacted.count"` + ApachedruidIntervalSkipCompactCount MetricConfig `mapstructure:"apachedruid.interval.skip_compact.count"` + ApachedruidIntervalWaitCompactCount MetricConfig `mapstructure:"apachedruid.interval.wait_compact.count"` + ApachedruidJettyNumOpenConnections MetricConfig `mapstructure:"apachedruid.jetty.num_open_connections"` + ApachedruidJettyThreadPoolBusy MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.busy"` + ApachedruidJettyThreadPoolIdle MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.idle"` + ApachedruidJettyThreadPoolIsLowOnThreads MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.is_low_on_threads"` + ApachedruidJettyThreadPoolMax MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.max"` + ApachedruidJettyThreadPoolMin MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.min"` + ApachedruidJettyThreadPoolQueueSize MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.queue_size"` + ApachedruidJettyThreadPoolTotal MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.total"` + ApachedruidJvmBufferpoolCapacity MetricConfig `mapstructure:"apachedruid.jvm.bufferpool.capacity"` + ApachedruidJvmBufferpoolCount MetricConfig `mapstructure:"apachedruid.jvm.bufferpool.count"` + ApachedruidJvmBufferpoolUsed MetricConfig `mapstructure:"apachedruid.jvm.bufferpool.used"` + ApachedruidJvmGcCount MetricConfig `mapstructure:"apachedruid.jvm.gc.count"` + ApachedruidJvmGcCPU MetricConfig `mapstructure:"apachedruid.jvm.gc.cpu"` + ApachedruidJvmMemCommitted MetricConfig `mapstructure:"apachedruid.jvm.mem.committed"` + ApachedruidJvmMemInit MetricConfig `mapstructure:"apachedruid.jvm.mem.init"` + ApachedruidJvmMemMax MetricConfig `mapstructure:"apachedruid.jvm.mem.max"` + ApachedruidJvmMemUsed MetricConfig `mapstructure:"apachedruid.jvm.mem.used"` + ApachedruidJvmPoolCommitted MetricConfig `mapstructure:"apachedruid.jvm.pool.committed"` + ApachedruidJvmPoolInit MetricConfig `mapstructure:"apachedruid.jvm.pool.init"` + ApachedruidJvmPoolMax MetricConfig `mapstructure:"apachedruid.jvm.pool.max"` + ApachedruidJvmPoolUsed MetricConfig `mapstructure:"apachedruid.jvm.pool.used"` + ApachedruidKillPendingSegmentsCount MetricConfig `mapstructure:"apachedruid.kill.pending_segments.count"` + ApachedruidKillTaskCount MetricConfig `mapstructure:"apachedruid.kill.task.count"` + ApachedruidKillTaskAvailableSlotCount MetricConfig `mapstructure:"apachedruid.kill_task.available_slot.count"` + ApachedruidKillTaskMaxSlotCount MetricConfig `mapstructure:"apachedruid.kill_task.max_slot.count"` + ApachedruidMergeBufferPendingRequests MetricConfig `mapstructure:"apachedruid.merge_buffer.pending_requests"` + ApachedruidMetadataKillAuditCount MetricConfig `mapstructure:"apachedruid.metadata.kill.audit.count"` + ApachedruidMetadataKillCompactionCount MetricConfig `mapstructure:"apachedruid.metadata.kill.compaction.count"` + ApachedruidMetadataKillDatasourceCount MetricConfig `mapstructure:"apachedruid.metadata.kill.datasource.count"` + ApachedruidMetadataKillRuleCount MetricConfig `mapstructure:"apachedruid.metadata.kill.rule.count"` + ApachedruidMetadataKillSupervisorCount MetricConfig `mapstructure:"apachedruid.metadata.kill.supervisor.count"` + ApachedruidMetadatacacheInitTime MetricConfig `mapstructure:"apachedruid.metadatacache.init.time"` + ApachedruidMetadatacacheRefreshCount MetricConfig `mapstructure:"apachedruid.metadatacache.refresh.count"` + ApachedruidMetadatacacheRefreshTime MetricConfig `mapstructure:"apachedruid.metadatacache.refresh.time"` + ApachedruidQueryByteLimitExceededCount MetricConfig `mapstructure:"apachedruid.query.byte_limit.exceeded.count"` + ApachedruidQueryBytes MetricConfig `mapstructure:"apachedruid.query.bytes"` + ApachedruidQueryCacheDeltaAverageBytes MetricConfig `mapstructure:"apachedruid.query.cache.delta.average_bytes"` + ApachedruidQueryCacheDeltaErrors MetricConfig `mapstructure:"apachedruid.query.cache.delta.errors"` + ApachedruidQueryCacheDeltaEvictions MetricConfig `mapstructure:"apachedruid.query.cache.delta.evictions"` + ApachedruidQueryCacheDeltaHitRate MetricConfig `mapstructure:"apachedruid.query.cache.delta.hit_rate"` + ApachedruidQueryCacheDeltaHits MetricConfig `mapstructure:"apachedruid.query.cache.delta.hits"` + ApachedruidQueryCacheDeltaMisses MetricConfig `mapstructure:"apachedruid.query.cache.delta.misses"` + ApachedruidQueryCacheDeltaNumEntries MetricConfig `mapstructure:"apachedruid.query.cache.delta.num_entries"` + ApachedruidQueryCacheDeltaPutError MetricConfig `mapstructure:"apachedruid.query.cache.delta.put.error"` + ApachedruidQueryCacheDeltaPutOk MetricConfig `mapstructure:"apachedruid.query.cache.delta.put.ok"` + ApachedruidQueryCacheDeltaPutOversized MetricConfig `mapstructure:"apachedruid.query.cache.delta.put.oversized"` + ApachedruidQueryCacheDeltaSizeBytes MetricConfig `mapstructure:"apachedruid.query.cache.delta.size_bytes"` + ApachedruidQueryCacheDeltaTimeouts MetricConfig `mapstructure:"apachedruid.query.cache.delta.timeouts"` + ApachedruidQueryCacheMemcachedDelta MetricConfig `mapstructure:"apachedruid.query.cache.memcached.delta"` + ApachedruidQueryCacheMemcachedTotal MetricConfig `mapstructure:"apachedruid.query.cache.memcached.total"` + ApachedruidQueryCacheTotalAverageBytes MetricConfig `mapstructure:"apachedruid.query.cache.total.average_bytes"` + ApachedruidQueryCacheTotalErrors MetricConfig `mapstructure:"apachedruid.query.cache.total.errors"` + ApachedruidQueryCacheTotalEvictions MetricConfig `mapstructure:"apachedruid.query.cache.total.evictions"` + ApachedruidQueryCacheTotalHitRate MetricConfig `mapstructure:"apachedruid.query.cache.total.hit_rate"` + ApachedruidQueryCacheTotalHits MetricConfig `mapstructure:"apachedruid.query.cache.total.hits"` + ApachedruidQueryCacheTotalMisses MetricConfig `mapstructure:"apachedruid.query.cache.total.misses"` + ApachedruidQueryCacheTotalNumEntries MetricConfig `mapstructure:"apachedruid.query.cache.total.num_entries"` + ApachedruidQueryCacheTotalPutError MetricConfig `mapstructure:"apachedruid.query.cache.total.put.error"` + ApachedruidQueryCacheTotalPutOk MetricConfig `mapstructure:"apachedruid.query.cache.total.put.ok"` + ApachedruidQueryCacheTotalPutOversized MetricConfig `mapstructure:"apachedruid.query.cache.total.put.oversized"` + ApachedruidQueryCacheTotalSizeBytes MetricConfig `mapstructure:"apachedruid.query.cache.total.size_bytes"` + ApachedruidQueryCacheTotalTimeouts MetricConfig `mapstructure:"apachedruid.query.cache.total.timeouts"` + ApachedruidQueryCount MetricConfig `mapstructure:"apachedruid.query.count"` + ApachedruidQueryCPUTime MetricConfig `mapstructure:"apachedruid.query.cpu.time"` + ApachedruidQueryFailedCount MetricConfig `mapstructure:"apachedruid.query.failed.count"` + ApachedruidQueryInterruptedCount MetricConfig `mapstructure:"apachedruid.query.interrupted.count"` + ApachedruidQueryNodeBackpressure MetricConfig `mapstructure:"apachedruid.query.node.backpressure"` + ApachedruidQueryNodeBytes MetricConfig `mapstructure:"apachedruid.query.node.bytes"` + ApachedruidQueryNodeTime MetricConfig `mapstructure:"apachedruid.query.node.time"` + ApachedruidQueryNodeTtfb MetricConfig `mapstructure:"apachedruid.query.node.ttfb"` + ApachedruidQueryPriority MetricConfig `mapstructure:"apachedruid.query.priority"` + ApachedruidQueryRowLimitExceededCount MetricConfig `mapstructure:"apachedruid.query.row_limit.exceeded.count"` + ApachedruidQuerySegmentTime MetricConfig `mapstructure:"apachedruid.query.segment.time"` + ApachedruidQuerySegmentAndCacheTime MetricConfig `mapstructure:"apachedruid.query.segment_and_cache.time"` + ApachedruidQuerySegmentsCount MetricConfig `mapstructure:"apachedruid.query.segments.count"` + ApachedruidQuerySuccessCount MetricConfig `mapstructure:"apachedruid.query.success.count"` + ApachedruidQueryTime MetricConfig `mapstructure:"apachedruid.query.time"` + ApachedruidQueryTimeoutCount MetricConfig `mapstructure:"apachedruid.query.timeout.count"` + ApachedruidQueryWaitTime MetricConfig `mapstructure:"apachedruid.query.wait.time"` + ApachedruidSegmentAddedBytes MetricConfig `mapstructure:"apachedruid.segment.added.bytes"` + ApachedruidSegmentAssignSkippedCount MetricConfig `mapstructure:"apachedruid.segment.assign_skipped.count"` + ApachedruidSegmentAssignedCount MetricConfig `mapstructure:"apachedruid.segment.assigned.count"` + ApachedruidSegmentCompactedBytes MetricConfig `mapstructure:"apachedruid.segment.compacted.bytes"` + ApachedruidSegmentCompactedCount MetricConfig `mapstructure:"apachedruid.segment.compacted.count"` + ApachedruidSegmentCount MetricConfig `mapstructure:"apachedruid.segment.count"` + ApachedruidSegmentDeletedCount MetricConfig `mapstructure:"apachedruid.segment.deleted.count"` + ApachedruidSegmentDropQueueCount MetricConfig `mapstructure:"apachedruid.segment.drop_queue.count"` + ApachedruidSegmentDropSkippedCount MetricConfig `mapstructure:"apachedruid.segment.drop_skipped.count"` + ApachedruidSegmentDroppedCount MetricConfig `mapstructure:"apachedruid.segment.dropped.count"` + ApachedruidSegmentLoadQueueAssigned MetricConfig `mapstructure:"apachedruid.segment.load_queue.assigned"` + ApachedruidSegmentLoadQueueCancelled MetricConfig `mapstructure:"apachedruid.segment.load_queue.cancelled"` + ApachedruidSegmentLoadQueueCount MetricConfig `mapstructure:"apachedruid.segment.load_queue.count"` + ApachedruidSegmentLoadQueueFailed MetricConfig `mapstructure:"apachedruid.segment.load_queue.failed"` + ApachedruidSegmentLoadQueueSize MetricConfig `mapstructure:"apachedruid.segment.load_queue.size"` + ApachedruidSegmentLoadQueueSuccess MetricConfig `mapstructure:"apachedruid.segment.load_queue.success"` + ApachedruidSegmentMax MetricConfig `mapstructure:"apachedruid.segment.max"` + ApachedruidSegmentMoveSkippedCount MetricConfig `mapstructure:"apachedruid.segment.move_skipped.count"` + ApachedruidSegmentMovedBytes MetricConfig `mapstructure:"apachedruid.segment.moved.bytes"` + ApachedruidSegmentMovedCount MetricConfig `mapstructure:"apachedruid.segment.moved.count"` + ApachedruidSegmentNukedBytes MetricConfig `mapstructure:"apachedruid.segment.nuked.bytes"` + ApachedruidSegmentOverShadowedCount MetricConfig `mapstructure:"apachedruid.segment.over_shadowed.count"` + ApachedruidSegmentPendingDelete MetricConfig `mapstructure:"apachedruid.segment.pending_delete"` + ApachedruidSegmentRowCountAvg MetricConfig `mapstructure:"apachedruid.segment.row_count.avg"` + ApachedruidSegmentRowCountRangeCount MetricConfig `mapstructure:"apachedruid.segment.row_count.range.count"` + ApachedruidSegmentScanActive MetricConfig `mapstructure:"apachedruid.segment.scan.active"` + ApachedruidSegmentScanPending MetricConfig `mapstructure:"apachedruid.segment.scan.pending"` + ApachedruidSegmentSize MetricConfig `mapstructure:"apachedruid.segment.size"` + ApachedruidSegmentSkipCompactBytes MetricConfig `mapstructure:"apachedruid.segment.skip_compact.bytes"` + ApachedruidSegmentSkipCompactCount MetricConfig `mapstructure:"apachedruid.segment.skip_compact.count"` + ApachedruidSegmentUnavailableCount MetricConfig `mapstructure:"apachedruid.segment.unavailable.count"` + ApachedruidSegmentUnderReplicatedCount MetricConfig `mapstructure:"apachedruid.segment.under_replicated.count"` + ApachedruidSegmentUnneededCount MetricConfig `mapstructure:"apachedruid.segment.unneeded.count"` + ApachedruidSegmentUsed MetricConfig `mapstructure:"apachedruid.segment.used"` + ApachedruidSegmentUsedPercent MetricConfig `mapstructure:"apachedruid.segment.used_percent"` + ApachedruidSegmentWaitCompactBytes MetricConfig `mapstructure:"apachedruid.segment.wait_compact.bytes"` + ApachedruidSegmentWaitCompactCount MetricConfig `mapstructure:"apachedruid.segment.wait_compact.count"` + ApachedruidServerviewInitTime MetricConfig `mapstructure:"apachedruid.serverview.init.time"` + ApachedruidServerviewSyncHealthy MetricConfig `mapstructure:"apachedruid.serverview.sync.healthy"` + ApachedruidServerviewSyncUnstableTime MetricConfig `mapstructure:"apachedruid.serverview.sync.unstable_time"` + ApachedruidSQLQueryBytes MetricConfig `mapstructure:"apachedruid.sql_query.bytes"` + ApachedruidSQLQueryPlanningTimeMs MetricConfig `mapstructure:"apachedruid.sql_query.planning_time_ms"` + ApachedruidSQLQueryTime MetricConfig `mapstructure:"apachedruid.sql_query.time"` + ApachedruidSubqueryByteLimitCount MetricConfig `mapstructure:"apachedruid.subquery.byte_limit.count"` + ApachedruidSubqueryFallbackCount MetricConfig `mapstructure:"apachedruid.subquery.fallback.count"` + ApachedruidSubqueryFallbackInsufficientTypeCount MetricConfig `mapstructure:"apachedruid.subquery.fallback.insufficient_type.count"` + ApachedruidSubqueryFallbackUnknownReasonCount MetricConfig `mapstructure:"apachedruid.subquery.fallback.unknown_reason.count"` + ApachedruidSubqueryRowLimitCount MetricConfig `mapstructure:"apachedruid.subquery.row_limit.count"` + ApachedruidSysCPU MetricConfig `mapstructure:"apachedruid.sys.cpu"` + ApachedruidSysDiskQueue MetricConfig `mapstructure:"apachedruid.sys.disk.queue"` + ApachedruidSysDiskReadCount MetricConfig `mapstructure:"apachedruid.sys.disk.read.count"` + ApachedruidSysDiskReadSize MetricConfig `mapstructure:"apachedruid.sys.disk.read.size"` + ApachedruidSysDiskTransferTime MetricConfig `mapstructure:"apachedruid.sys.disk.transfer_time"` + ApachedruidSysDiskWriteCount MetricConfig `mapstructure:"apachedruid.sys.disk.write.count"` + ApachedruidSysDiskWriteSize MetricConfig `mapstructure:"apachedruid.sys.disk.write.size"` + ApachedruidSysFsFilesCount MetricConfig `mapstructure:"apachedruid.sys.fs.files.count"` + ApachedruidSysFsFilesFree MetricConfig `mapstructure:"apachedruid.sys.fs.files.free"` + ApachedruidSysFsMax MetricConfig `mapstructure:"apachedruid.sys.fs.max"` + ApachedruidSysFsUsed MetricConfig `mapstructure:"apachedruid.sys.fs.used"` + ApachedruidSysLa1 MetricConfig `mapstructure:"apachedruid.sys.la.1"` + ApachedruidSysLa15 MetricConfig `mapstructure:"apachedruid.sys.la.15"` + ApachedruidSysLa5 MetricConfig `mapstructure:"apachedruid.sys.la.5"` + ApachedruidSysMemFree MetricConfig `mapstructure:"apachedruid.sys.mem.free"` + ApachedruidSysMemMax MetricConfig `mapstructure:"apachedruid.sys.mem.max"` + ApachedruidSysMemUsed MetricConfig `mapstructure:"apachedruid.sys.mem.used"` + ApachedruidSysNetReadDropped MetricConfig `mapstructure:"apachedruid.sys.net.read.dropped"` + ApachedruidSysNetReadErrors MetricConfig `mapstructure:"apachedruid.sys.net.read.errors"` + ApachedruidSysNetReadPackets MetricConfig `mapstructure:"apachedruid.sys.net.read.packets"` + ApachedruidSysNetReadSize MetricConfig `mapstructure:"apachedruid.sys.net.read.size"` + ApachedruidSysNetWriteCollisions MetricConfig `mapstructure:"apachedruid.sys.net.write.collisions"` + ApachedruidSysNetWriteErrors MetricConfig `mapstructure:"apachedruid.sys.net.write.errors"` + ApachedruidSysNetWritePackets MetricConfig `mapstructure:"apachedruid.sys.net.write.packets"` + ApachedruidSysNetWriteSize MetricConfig `mapstructure:"apachedruid.sys.net.write.size"` + ApachedruidSysStorageUsed MetricConfig `mapstructure:"apachedruid.sys.storage.used"` + ApachedruidSysSwapFree MetricConfig `mapstructure:"apachedruid.sys.swap.free"` + ApachedruidSysSwapMax MetricConfig `mapstructure:"apachedruid.sys.swap.max"` + ApachedruidSysSwapPageIn MetricConfig `mapstructure:"apachedruid.sys.swap.page_in"` + ApachedruidSysSwapPageOut MetricConfig `mapstructure:"apachedruid.sys.swap.page_out"` + ApachedruidSysTcpv4ActiveOpens MetricConfig `mapstructure:"apachedruid.sys.tcpv4.active_opens"` + ApachedruidSysTcpv4AttemptFails MetricConfig `mapstructure:"apachedruid.sys.tcpv4.attempt_fails"` + ApachedruidSysTcpv4EstabResets MetricConfig `mapstructure:"apachedruid.sys.tcpv4.estab_resets"` + ApachedruidSysTcpv4InErrs MetricConfig `mapstructure:"apachedruid.sys.tcpv4.in.errs"` + ApachedruidSysTcpv4InSegs MetricConfig `mapstructure:"apachedruid.sys.tcpv4.in.segs"` + ApachedruidSysTcpv4OutRsts MetricConfig `mapstructure:"apachedruid.sys.tcpv4.out.rsts"` + ApachedruidSysTcpv4OutSegs MetricConfig `mapstructure:"apachedruid.sys.tcpv4.out.segs"` + ApachedruidSysTcpv4PassiveOpens MetricConfig `mapstructure:"apachedruid.sys.tcpv4.passive_opens"` + ApachedruidSysTcpv4RetransSegs MetricConfig `mapstructure:"apachedruid.sys.tcpv4.retrans.segs"` + ApachedruidSysUptime MetricConfig `mapstructure:"apachedruid.sys.uptime"` + ApachedruidTaskActionBatchAttempts MetricConfig `mapstructure:"apachedruid.task.action.batch.attempts"` + ApachedruidTaskActionBatchQueueTime MetricConfig `mapstructure:"apachedruid.task.action.batch.queue_time"` + ApachedruidTaskActionBatchRunTime MetricConfig `mapstructure:"apachedruid.task.action.batch.run_time"` + ApachedruidTaskActionBatchSize MetricConfig `mapstructure:"apachedruid.task.action.batch.size"` + ApachedruidTaskActionFailedCount MetricConfig `mapstructure:"apachedruid.task.action.failed.count"` + ApachedruidTaskActionLogTime MetricConfig `mapstructure:"apachedruid.task.action.log.time"` + ApachedruidTaskActionRunTime MetricConfig `mapstructure:"apachedruid.task.action.run.time"` + ApachedruidTaskActionSuccessCount MetricConfig `mapstructure:"apachedruid.task.action.success.count"` + ApachedruidTaskFailedCount MetricConfig `mapstructure:"apachedruid.task.failed.count"` + ApachedruidTaskPendingCount MetricConfig `mapstructure:"apachedruid.task.pending.count"` + ApachedruidTaskPendingTime MetricConfig `mapstructure:"apachedruid.task.pending.time"` + ApachedruidTaskRunTime MetricConfig `mapstructure:"apachedruid.task.run.time"` + ApachedruidTaskRunningCount MetricConfig `mapstructure:"apachedruid.task.running.count"` + ApachedruidTaskSegmentAvailabilityWaitTime MetricConfig `mapstructure:"apachedruid.task.segment_availability.wait.time"` + ApachedruidTaskSuccessCount MetricConfig `mapstructure:"apachedruid.task.success.count"` + ApachedruidTaskWaitingCount MetricConfig `mapstructure:"apachedruid.task.waiting.count"` + ApachedruidTaskSlotBlacklistedCount MetricConfig `mapstructure:"apachedruid.task_slot.blacklisted.count"` + ApachedruidTaskSlotIdleCount MetricConfig `mapstructure:"apachedruid.task_slot.idle.count"` + ApachedruidTaskSlotLazyCount MetricConfig `mapstructure:"apachedruid.task_slot.lazy.count"` + ApachedruidTaskSlotTotalCount MetricConfig `mapstructure:"apachedruid.task_slot.total.count"` + ApachedruidTaskSlotUsedCount MetricConfig `mapstructure:"apachedruid.task_slot.used.count"` + ApachedruidTierHistoricalCount MetricConfig `mapstructure:"apachedruid.tier.historical.count"` + ApachedruidTierReplicationFactor MetricConfig `mapstructure:"apachedruid.tier.replication.factor"` + ApachedruidTierRequiredCapacity MetricConfig `mapstructure:"apachedruid.tier.required.capacity"` + ApachedruidTierTotalCapacity MetricConfig `mapstructure:"apachedruid.tier.total.capacity"` + ApachedruidWorkerTaskFailedCount MetricConfig `mapstructure:"apachedruid.worker.task.failed.count"` + ApachedruidWorkerTaskSuccessCount MetricConfig `mapstructure:"apachedruid.worker.task.success.count"` + ApachedruidWorkerTaskSlotIdleCount MetricConfig `mapstructure:"apachedruid.worker.task_slot.idle.count"` + ApachedruidWorkerTaskSlotTotalCount MetricConfig `mapstructure:"apachedruid.worker.task_slot.total.count"` + ApachedruidWorkerTaskSlotUsedCount MetricConfig `mapstructure:"apachedruid.worker.task_slot.used.count"` + ApachedruidZkConnected MetricConfig `mapstructure:"apachedruid.zk.connected"` + ApachedruidZkReconnectTime MetricConfig `mapstructure:"apachedruid.zk.reconnect.time"` +} + +func DefaultMetricsConfig() MetricsConfig { + return MetricsConfig{ + ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis: MetricConfig{ + Enabled: true, + }, + ApachedruidCompactTaskCount: MetricConfig{ + Enabled: true, + }, + ApachedruidCompactTaskAvailableSlotCount: MetricConfig{ + Enabled: true, + }, + ApachedruidCompactTaskMaxSlotCount: MetricConfig{ + Enabled: true, + }, + ApachedruidCoordinatorGlobalTime: MetricConfig{ + Enabled: true, + }, + ApachedruidCoordinatorTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestBytesReceived: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestCount: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestEventsBuffered: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestEventsDuplicate: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestEventsMessageGap: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestEventsProcessed: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestEventsProcessedWithError: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestEventsThrownAway: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestEventsUnparseable: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestHandoffCount: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestHandoffFailed: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestHandoffTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestInputBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestKafkaAvgLag: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestKafkaLag: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestKafkaMaxLag: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestKafkaPartitionLag: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestKinesisAvgLagTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestKinesisLagTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestKinesisMaxLagTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestKinesisPartitionLagTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestMergeCPU: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestMergeTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestNoticesQueueSize: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestNoticesTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestPauseTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestPersistsBackPressure: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestPersistsCount: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestPersistsCPU: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestPersistsFailed: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestPersistsTime: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestRowsOutput: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestSegmentsCount: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestShuffleBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestShuffleRequests: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestSinkCount: MetricConfig{ + Enabled: true, + }, + ApachedruidIngestTombstonesCount: MetricConfig{ + Enabled: true, + }, + ApachedruidIntervalCompactedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidIntervalSkipCompactCount: MetricConfig{ + Enabled: true, + }, + ApachedruidIntervalWaitCompactCount: MetricConfig{ + Enabled: true, + }, + ApachedruidJettyNumOpenConnections: MetricConfig{ + Enabled: true, + }, + ApachedruidJettyThreadPoolBusy: MetricConfig{ + Enabled: true, + }, + ApachedruidJettyThreadPoolIdle: MetricConfig{ + Enabled: true, + }, + ApachedruidJettyThreadPoolIsLowOnThreads: MetricConfig{ + Enabled: true, + }, + ApachedruidJettyThreadPoolMax: MetricConfig{ + Enabled: true, + }, + ApachedruidJettyThreadPoolMin: MetricConfig{ + Enabled: true, + }, + ApachedruidJettyThreadPoolQueueSize: MetricConfig{ + Enabled: true, + }, + ApachedruidJettyThreadPoolTotal: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmBufferpoolCapacity: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmBufferpoolCount: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmBufferpoolUsed: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmGcCount: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmGcCPU: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmMemCommitted: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmMemInit: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmMemMax: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmMemUsed: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmPoolCommitted: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmPoolInit: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmPoolMax: MetricConfig{ + Enabled: true, + }, + ApachedruidJvmPoolUsed: MetricConfig{ + Enabled: true, + }, + ApachedruidKillPendingSegmentsCount: MetricConfig{ + Enabled: true, + }, + ApachedruidKillTaskCount: MetricConfig{ + Enabled: true, + }, + ApachedruidKillTaskAvailableSlotCount: MetricConfig{ + Enabled: true, + }, + ApachedruidKillTaskMaxSlotCount: MetricConfig{ + Enabled: true, + }, + ApachedruidMergeBufferPendingRequests: MetricConfig{ + Enabled: true, + }, + ApachedruidMetadataKillAuditCount: MetricConfig{ + Enabled: true, + }, + ApachedruidMetadataKillCompactionCount: MetricConfig{ + Enabled: true, + }, + ApachedruidMetadataKillDatasourceCount: MetricConfig{ + Enabled: true, + }, + ApachedruidMetadataKillRuleCount: MetricConfig{ + Enabled: true, + }, + ApachedruidMetadataKillSupervisorCount: MetricConfig{ + Enabled: true, + }, + ApachedruidMetadatacacheInitTime: MetricConfig{ + Enabled: true, + }, + ApachedruidMetadatacacheRefreshCount: MetricConfig{ + Enabled: true, + }, + ApachedruidMetadatacacheRefreshTime: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryByteLimitExceededCount: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaAverageBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaErrors: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaEvictions: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaHitRate: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaHits: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaMisses: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaNumEntries: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaPutError: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaPutOk: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaPutOversized: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaSizeBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheDeltaTimeouts: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheMemcachedDelta: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheMemcachedTotal: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalAverageBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalErrors: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalEvictions: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalHitRate: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalHits: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalMisses: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalNumEntries: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalPutError: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalPutOk: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalPutOversized: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalSizeBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCacheTotalTimeouts: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCount: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryCPUTime: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryFailedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryInterruptedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryNodeBackpressure: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryNodeBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryNodeTime: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryNodeTtfb: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryPriority: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryRowLimitExceededCount: MetricConfig{ + Enabled: true, + }, + ApachedruidQuerySegmentTime: MetricConfig{ + Enabled: true, + }, + ApachedruidQuerySegmentAndCacheTime: MetricConfig{ + Enabled: true, + }, + ApachedruidQuerySegmentsCount: MetricConfig{ + Enabled: true, + }, + ApachedruidQuerySuccessCount: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryTime: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryTimeoutCount: MetricConfig{ + Enabled: true, + }, + ApachedruidQueryWaitTime: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentAddedBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentAssignSkippedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentAssignedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentCompactedBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentCompactedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentDeletedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentDropQueueCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentDropSkippedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentDroppedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentLoadQueueAssigned: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentLoadQueueCancelled: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentLoadQueueCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentLoadQueueFailed: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentLoadQueueSize: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentLoadQueueSuccess: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentMax: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentMoveSkippedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentMovedBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentMovedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentNukedBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentOverShadowedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentPendingDelete: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentRowCountAvg: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentRowCountRangeCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentScanActive: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentScanPending: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentSize: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentSkipCompactBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentSkipCompactCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentUnavailableCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentUnderReplicatedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentUnneededCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentUsed: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentUsedPercent: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentWaitCompactBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidSegmentWaitCompactCount: MetricConfig{ + Enabled: true, + }, + ApachedruidServerviewInitTime: MetricConfig{ + Enabled: true, + }, + ApachedruidServerviewSyncHealthy: MetricConfig{ + Enabled: true, + }, + ApachedruidServerviewSyncUnstableTime: MetricConfig{ + Enabled: true, + }, + ApachedruidSQLQueryBytes: MetricConfig{ + Enabled: true, + }, + ApachedruidSQLQueryPlanningTimeMs: MetricConfig{ + Enabled: true, + }, + ApachedruidSQLQueryTime: MetricConfig{ + Enabled: true, + }, + ApachedruidSubqueryByteLimitCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSubqueryFallbackCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSubqueryFallbackInsufficientTypeCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSubqueryFallbackUnknownReasonCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSubqueryRowLimitCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSysCPU: MetricConfig{ + Enabled: true, + }, + ApachedruidSysDiskQueue: MetricConfig{ + Enabled: true, + }, + ApachedruidSysDiskReadCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSysDiskReadSize: MetricConfig{ + Enabled: true, + }, + ApachedruidSysDiskTransferTime: MetricConfig{ + Enabled: true, + }, + ApachedruidSysDiskWriteCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSysDiskWriteSize: MetricConfig{ + Enabled: true, + }, + ApachedruidSysFsFilesCount: MetricConfig{ + Enabled: true, + }, + ApachedruidSysFsFilesFree: MetricConfig{ + Enabled: true, + }, + ApachedruidSysFsMax: MetricConfig{ + Enabled: true, + }, + ApachedruidSysFsUsed: MetricConfig{ + Enabled: true, + }, + ApachedruidSysLa1: MetricConfig{ + Enabled: true, + }, + ApachedruidSysLa15: MetricConfig{ + Enabled: true, + }, + ApachedruidSysLa5: MetricConfig{ + Enabled: true, + }, + ApachedruidSysMemFree: MetricConfig{ + Enabled: true, + }, + ApachedruidSysMemMax: MetricConfig{ + Enabled: true, + }, + ApachedruidSysMemUsed: MetricConfig{ + Enabled: true, + }, + ApachedruidSysNetReadDropped: MetricConfig{ + Enabled: true, + }, + ApachedruidSysNetReadErrors: MetricConfig{ + Enabled: true, + }, + ApachedruidSysNetReadPackets: MetricConfig{ + Enabled: true, + }, + ApachedruidSysNetReadSize: MetricConfig{ + Enabled: true, + }, + ApachedruidSysNetWriteCollisions: MetricConfig{ + Enabled: true, + }, + ApachedruidSysNetWriteErrors: MetricConfig{ + Enabled: true, + }, + ApachedruidSysNetWritePackets: MetricConfig{ + Enabled: true, + }, + ApachedruidSysNetWriteSize: MetricConfig{ + Enabled: true, + }, + ApachedruidSysStorageUsed: MetricConfig{ + Enabled: true, + }, + ApachedruidSysSwapFree: MetricConfig{ + Enabled: true, + }, + ApachedruidSysSwapMax: MetricConfig{ + Enabled: true, + }, + ApachedruidSysSwapPageIn: MetricConfig{ + Enabled: true, + }, + ApachedruidSysSwapPageOut: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4ActiveOpens: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4AttemptFails: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4EstabResets: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4InErrs: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4InSegs: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4OutRsts: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4OutSegs: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4PassiveOpens: MetricConfig{ + Enabled: true, + }, + ApachedruidSysTcpv4RetransSegs: MetricConfig{ + Enabled: true, + }, + ApachedruidSysUptime: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskActionBatchAttempts: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskActionBatchQueueTime: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskActionBatchRunTime: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskActionBatchSize: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskActionFailedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskActionLogTime: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskActionRunTime: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskActionSuccessCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskFailedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskPendingCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskPendingTime: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskRunTime: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskRunningCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskSegmentAvailabilityWaitTime: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskSuccessCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskWaitingCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskSlotBlacklistedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskSlotIdleCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskSlotLazyCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskSlotTotalCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTaskSlotUsedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTierHistoricalCount: MetricConfig{ + Enabled: true, + }, + ApachedruidTierReplicationFactor: MetricConfig{ + Enabled: true, + }, + ApachedruidTierRequiredCapacity: MetricConfig{ + Enabled: true, + }, + ApachedruidTierTotalCapacity: MetricConfig{ + Enabled: true, + }, + ApachedruidWorkerTaskFailedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidWorkerTaskSuccessCount: MetricConfig{ + Enabled: true, + }, + ApachedruidWorkerTaskSlotIdleCount: MetricConfig{ + Enabled: true, + }, + ApachedruidWorkerTaskSlotTotalCount: MetricConfig{ + Enabled: true, + }, + ApachedruidWorkerTaskSlotUsedCount: MetricConfig{ + Enabled: true, + }, + ApachedruidZkConnected: MetricConfig{ + Enabled: true, + }, + ApachedruidZkReconnectTime: MetricConfig{ + Enabled: true, + }, + } +} + +// ResourceAttributeConfig provides common config for a particular resource attribute. +type ResourceAttributeConfig struct { + Enabled bool `mapstructure:"enabled"` + + enabledSetByUser bool +} + +func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(rac) + if err != nil { + return err + } + rac.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// ResourceAttributesConfig provides config for apachedruid resource attributes. +type ResourceAttributesConfig struct { + ApachedruidClusterName ResourceAttributeConfig `mapstructure:"apachedruid.cluster.name"` + ApachedruidNodeHost ResourceAttributeConfig `mapstructure:"apachedruid.node.host"` + ApachedruidNodeService ResourceAttributeConfig `mapstructure:"apachedruid.node.service"` +} + +func DefaultResourceAttributesConfig() ResourceAttributesConfig { + return ResourceAttributesConfig{ + ApachedruidClusterName: ResourceAttributeConfig{ + Enabled: true, + }, + ApachedruidNodeHost: ResourceAttributeConfig{ + Enabled: true, + }, + ApachedruidNodeService: ResourceAttributeConfig{ + Enabled: true, + }, + } +} + +// MetricsBuilderConfig is a configuration for apachedruid metrics builder. +type MetricsBuilderConfig struct { + Metrics MetricsConfig `mapstructure:"metrics"` + ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` +} + +func DefaultMetricsBuilderConfig() MetricsBuilderConfig { + return MetricsBuilderConfig{ + Metrics: DefaultMetricsConfig(), + ResourceAttributes: DefaultResourceAttributesConfig(), + } +} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_config_test.go b/receiver/apachedruidreceiver/internal/metadata/generated_config_test.go new file mode 100644 index 0000000000000..40312148ee7c1 --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/generated_config_test.go @@ -0,0 +1,606 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" +) + +func TestMetricsBuilderConfig(t *testing.T) { + tests := []struct { + name string + want MetricsBuilderConfig + }{ + { + name: "default", + want: DefaultMetricsBuilderConfig(), + }, + { + name: "all_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis: MetricConfig{Enabled: true}, + ApachedruidCompactTaskCount: MetricConfig{Enabled: true}, + ApachedruidCompactTaskAvailableSlotCount: MetricConfig{Enabled: true}, + ApachedruidCompactTaskMaxSlotCount: MetricConfig{Enabled: true}, + ApachedruidCoordinatorGlobalTime: MetricConfig{Enabled: true}, + ApachedruidCoordinatorTime: MetricConfig{Enabled: true}, + ApachedruidIngestBytesReceived: MetricConfig{Enabled: true}, + ApachedruidIngestCount: MetricConfig{Enabled: true}, + ApachedruidIngestEventsBuffered: MetricConfig{Enabled: true}, + ApachedruidIngestEventsDuplicate: MetricConfig{Enabled: true}, + ApachedruidIngestEventsMessageGap: MetricConfig{Enabled: true}, + ApachedruidIngestEventsProcessed: MetricConfig{Enabled: true}, + ApachedruidIngestEventsProcessedWithError: MetricConfig{Enabled: true}, + ApachedruidIngestEventsThrownAway: MetricConfig{Enabled: true}, + ApachedruidIngestEventsUnparseable: MetricConfig{Enabled: true}, + ApachedruidIngestHandoffCount: MetricConfig{Enabled: true}, + ApachedruidIngestHandoffFailed: MetricConfig{Enabled: true}, + ApachedruidIngestHandoffTime: MetricConfig{Enabled: true}, + ApachedruidIngestInputBytes: MetricConfig{Enabled: true}, + ApachedruidIngestKafkaAvgLag: MetricConfig{Enabled: true}, + ApachedruidIngestKafkaLag: MetricConfig{Enabled: true}, + ApachedruidIngestKafkaMaxLag: MetricConfig{Enabled: true}, + ApachedruidIngestKafkaPartitionLag: MetricConfig{Enabled: true}, + ApachedruidIngestKinesisAvgLagTime: MetricConfig{Enabled: true}, + ApachedruidIngestKinesisLagTime: MetricConfig{Enabled: true}, + ApachedruidIngestKinesisMaxLagTime: MetricConfig{Enabled: true}, + ApachedruidIngestKinesisPartitionLagTime: MetricConfig{Enabled: true}, + ApachedruidIngestMergeCPU: MetricConfig{Enabled: true}, + ApachedruidIngestMergeTime: MetricConfig{Enabled: true}, + ApachedruidIngestNoticesQueueSize: MetricConfig{Enabled: true}, + ApachedruidIngestNoticesTime: MetricConfig{Enabled: true}, + ApachedruidIngestPauseTime: MetricConfig{Enabled: true}, + ApachedruidIngestPersistsBackPressure: MetricConfig{Enabled: true}, + ApachedruidIngestPersistsCount: MetricConfig{Enabled: true}, + ApachedruidIngestPersistsCPU: MetricConfig{Enabled: true}, + ApachedruidIngestPersistsFailed: MetricConfig{Enabled: true}, + ApachedruidIngestPersistsTime: MetricConfig{Enabled: true}, + ApachedruidIngestRowsOutput: MetricConfig{Enabled: true}, + ApachedruidIngestSegmentsCount: MetricConfig{Enabled: true}, + ApachedruidIngestShuffleBytes: MetricConfig{Enabled: true}, + ApachedruidIngestShuffleRequests: MetricConfig{Enabled: true}, + ApachedruidIngestSinkCount: MetricConfig{Enabled: true}, + ApachedruidIngestTombstonesCount: MetricConfig{Enabled: true}, + ApachedruidIntervalCompactedCount: MetricConfig{Enabled: true}, + ApachedruidIntervalSkipCompactCount: MetricConfig{Enabled: true}, + ApachedruidIntervalWaitCompactCount: MetricConfig{Enabled: true}, + ApachedruidJettyNumOpenConnections: MetricConfig{Enabled: true}, + ApachedruidJettyThreadPoolBusy: MetricConfig{Enabled: true}, + ApachedruidJettyThreadPoolIdle: MetricConfig{Enabled: true}, + ApachedruidJettyThreadPoolIsLowOnThreads: MetricConfig{Enabled: true}, + ApachedruidJettyThreadPoolMax: MetricConfig{Enabled: true}, + ApachedruidJettyThreadPoolMin: MetricConfig{Enabled: true}, + ApachedruidJettyThreadPoolQueueSize: MetricConfig{Enabled: true}, + ApachedruidJettyThreadPoolTotal: MetricConfig{Enabled: true}, + ApachedruidJvmBufferpoolCapacity: MetricConfig{Enabled: true}, + ApachedruidJvmBufferpoolCount: MetricConfig{Enabled: true}, + ApachedruidJvmBufferpoolUsed: MetricConfig{Enabled: true}, + ApachedruidJvmGcCount: MetricConfig{Enabled: true}, + ApachedruidJvmGcCPU: MetricConfig{Enabled: true}, + ApachedruidJvmMemCommitted: MetricConfig{Enabled: true}, + ApachedruidJvmMemInit: MetricConfig{Enabled: true}, + ApachedruidJvmMemMax: MetricConfig{Enabled: true}, + ApachedruidJvmMemUsed: MetricConfig{Enabled: true}, + ApachedruidJvmPoolCommitted: MetricConfig{Enabled: true}, + ApachedruidJvmPoolInit: MetricConfig{Enabled: true}, + ApachedruidJvmPoolMax: MetricConfig{Enabled: true}, + ApachedruidJvmPoolUsed: MetricConfig{Enabled: true}, + ApachedruidKillPendingSegmentsCount: MetricConfig{Enabled: true}, + ApachedruidKillTaskCount: MetricConfig{Enabled: true}, + ApachedruidKillTaskAvailableSlotCount: MetricConfig{Enabled: true}, + ApachedruidKillTaskMaxSlotCount: MetricConfig{Enabled: true}, + ApachedruidMergeBufferPendingRequests: MetricConfig{Enabled: true}, + ApachedruidMetadataKillAuditCount: MetricConfig{Enabled: true}, + ApachedruidMetadataKillCompactionCount: MetricConfig{Enabled: true}, + ApachedruidMetadataKillDatasourceCount: MetricConfig{Enabled: true}, + ApachedruidMetadataKillRuleCount: MetricConfig{Enabled: true}, + ApachedruidMetadataKillSupervisorCount: MetricConfig{Enabled: true}, + ApachedruidMetadatacacheInitTime: MetricConfig{Enabled: true}, + ApachedruidMetadatacacheRefreshCount: MetricConfig{Enabled: true}, + ApachedruidMetadatacacheRefreshTime: MetricConfig{Enabled: true}, + ApachedruidQueryByteLimitExceededCount: MetricConfig{Enabled: true}, + ApachedruidQueryBytes: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaAverageBytes: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaErrors: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaEvictions: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaHitRate: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaHits: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaMisses: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaNumEntries: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaPutError: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaPutOk: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaPutOversized: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaSizeBytes: MetricConfig{Enabled: true}, + ApachedruidQueryCacheDeltaTimeouts: MetricConfig{Enabled: true}, + ApachedruidQueryCacheMemcachedDelta: MetricConfig{Enabled: true}, + ApachedruidQueryCacheMemcachedTotal: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalAverageBytes: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalErrors: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalEvictions: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalHitRate: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalHits: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalMisses: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalNumEntries: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalPutError: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalPutOk: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalPutOversized: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalSizeBytes: MetricConfig{Enabled: true}, + ApachedruidQueryCacheTotalTimeouts: MetricConfig{Enabled: true}, + ApachedruidQueryCount: MetricConfig{Enabled: true}, + ApachedruidQueryCPUTime: MetricConfig{Enabled: true}, + ApachedruidQueryFailedCount: MetricConfig{Enabled: true}, + ApachedruidQueryInterruptedCount: MetricConfig{Enabled: true}, + ApachedruidQueryNodeBackpressure: MetricConfig{Enabled: true}, + ApachedruidQueryNodeBytes: MetricConfig{Enabled: true}, + ApachedruidQueryNodeTime: MetricConfig{Enabled: true}, + ApachedruidQueryNodeTtfb: MetricConfig{Enabled: true}, + ApachedruidQueryPriority: MetricConfig{Enabled: true}, + ApachedruidQueryRowLimitExceededCount: MetricConfig{Enabled: true}, + ApachedruidQuerySegmentTime: MetricConfig{Enabled: true}, + ApachedruidQuerySegmentAndCacheTime: MetricConfig{Enabled: true}, + ApachedruidQuerySegmentsCount: MetricConfig{Enabled: true}, + ApachedruidQuerySuccessCount: MetricConfig{Enabled: true}, + ApachedruidQueryTime: MetricConfig{Enabled: true}, + ApachedruidQueryTimeoutCount: MetricConfig{Enabled: true}, + ApachedruidQueryWaitTime: MetricConfig{Enabled: true}, + ApachedruidSegmentAddedBytes: MetricConfig{Enabled: true}, + ApachedruidSegmentAssignSkippedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentAssignedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentCompactedBytes: MetricConfig{Enabled: true}, + ApachedruidSegmentCompactedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentCount: MetricConfig{Enabled: true}, + ApachedruidSegmentDeletedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentDropQueueCount: MetricConfig{Enabled: true}, + ApachedruidSegmentDropSkippedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentDroppedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentLoadQueueAssigned: MetricConfig{Enabled: true}, + ApachedruidSegmentLoadQueueCancelled: MetricConfig{Enabled: true}, + ApachedruidSegmentLoadQueueCount: MetricConfig{Enabled: true}, + ApachedruidSegmentLoadQueueFailed: MetricConfig{Enabled: true}, + ApachedruidSegmentLoadQueueSize: MetricConfig{Enabled: true}, + ApachedruidSegmentLoadQueueSuccess: MetricConfig{Enabled: true}, + ApachedruidSegmentMax: MetricConfig{Enabled: true}, + ApachedruidSegmentMoveSkippedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentMovedBytes: MetricConfig{Enabled: true}, + ApachedruidSegmentMovedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentNukedBytes: MetricConfig{Enabled: true}, + ApachedruidSegmentOverShadowedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentPendingDelete: MetricConfig{Enabled: true}, + ApachedruidSegmentRowCountAvg: MetricConfig{Enabled: true}, + ApachedruidSegmentRowCountRangeCount: MetricConfig{Enabled: true}, + ApachedruidSegmentScanActive: MetricConfig{Enabled: true}, + ApachedruidSegmentScanPending: MetricConfig{Enabled: true}, + ApachedruidSegmentSize: MetricConfig{Enabled: true}, + ApachedruidSegmentSkipCompactBytes: MetricConfig{Enabled: true}, + ApachedruidSegmentSkipCompactCount: MetricConfig{Enabled: true}, + ApachedruidSegmentUnavailableCount: MetricConfig{Enabled: true}, + ApachedruidSegmentUnderReplicatedCount: MetricConfig{Enabled: true}, + ApachedruidSegmentUnneededCount: MetricConfig{Enabled: true}, + ApachedruidSegmentUsed: MetricConfig{Enabled: true}, + ApachedruidSegmentUsedPercent: MetricConfig{Enabled: true}, + ApachedruidSegmentWaitCompactBytes: MetricConfig{Enabled: true}, + ApachedruidSegmentWaitCompactCount: MetricConfig{Enabled: true}, + ApachedruidServerviewInitTime: MetricConfig{Enabled: true}, + ApachedruidServerviewSyncHealthy: MetricConfig{Enabled: true}, + ApachedruidServerviewSyncUnstableTime: MetricConfig{Enabled: true}, + ApachedruidSQLQueryBytes: MetricConfig{Enabled: true}, + ApachedruidSQLQueryPlanningTimeMs: MetricConfig{Enabled: true}, + ApachedruidSQLQueryTime: MetricConfig{Enabled: true}, + ApachedruidSubqueryByteLimitCount: MetricConfig{Enabled: true}, + ApachedruidSubqueryFallbackCount: MetricConfig{Enabled: true}, + ApachedruidSubqueryFallbackInsufficientTypeCount: MetricConfig{Enabled: true}, + ApachedruidSubqueryFallbackUnknownReasonCount: MetricConfig{Enabled: true}, + ApachedruidSubqueryRowLimitCount: MetricConfig{Enabled: true}, + ApachedruidSysCPU: MetricConfig{Enabled: true}, + ApachedruidSysDiskQueue: MetricConfig{Enabled: true}, + ApachedruidSysDiskReadCount: MetricConfig{Enabled: true}, + ApachedruidSysDiskReadSize: MetricConfig{Enabled: true}, + ApachedruidSysDiskTransferTime: MetricConfig{Enabled: true}, + ApachedruidSysDiskWriteCount: MetricConfig{Enabled: true}, + ApachedruidSysDiskWriteSize: MetricConfig{Enabled: true}, + ApachedruidSysFsFilesCount: MetricConfig{Enabled: true}, + ApachedruidSysFsFilesFree: MetricConfig{Enabled: true}, + ApachedruidSysFsMax: MetricConfig{Enabled: true}, + ApachedruidSysFsUsed: MetricConfig{Enabled: true}, + ApachedruidSysLa1: MetricConfig{Enabled: true}, + ApachedruidSysLa15: MetricConfig{Enabled: true}, + ApachedruidSysLa5: MetricConfig{Enabled: true}, + ApachedruidSysMemFree: MetricConfig{Enabled: true}, + ApachedruidSysMemMax: MetricConfig{Enabled: true}, + ApachedruidSysMemUsed: MetricConfig{Enabled: true}, + ApachedruidSysNetReadDropped: MetricConfig{Enabled: true}, + ApachedruidSysNetReadErrors: MetricConfig{Enabled: true}, + ApachedruidSysNetReadPackets: MetricConfig{Enabled: true}, + ApachedruidSysNetReadSize: MetricConfig{Enabled: true}, + ApachedruidSysNetWriteCollisions: MetricConfig{Enabled: true}, + ApachedruidSysNetWriteErrors: MetricConfig{Enabled: true}, + ApachedruidSysNetWritePackets: MetricConfig{Enabled: true}, + ApachedruidSysNetWriteSize: MetricConfig{Enabled: true}, + ApachedruidSysStorageUsed: MetricConfig{Enabled: true}, + ApachedruidSysSwapFree: MetricConfig{Enabled: true}, + ApachedruidSysSwapMax: MetricConfig{Enabled: true}, + ApachedruidSysSwapPageIn: MetricConfig{Enabled: true}, + ApachedruidSysSwapPageOut: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4ActiveOpens: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4AttemptFails: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4EstabResets: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4InErrs: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4InSegs: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4OutRsts: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4OutSegs: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4PassiveOpens: MetricConfig{Enabled: true}, + ApachedruidSysTcpv4RetransSegs: MetricConfig{Enabled: true}, + ApachedruidSysUptime: MetricConfig{Enabled: true}, + ApachedruidTaskActionBatchAttempts: MetricConfig{Enabled: true}, + ApachedruidTaskActionBatchQueueTime: MetricConfig{Enabled: true}, + ApachedruidTaskActionBatchRunTime: MetricConfig{Enabled: true}, + ApachedruidTaskActionBatchSize: MetricConfig{Enabled: true}, + ApachedruidTaskActionFailedCount: MetricConfig{Enabled: true}, + ApachedruidTaskActionLogTime: MetricConfig{Enabled: true}, + ApachedruidTaskActionRunTime: MetricConfig{Enabled: true}, + ApachedruidTaskActionSuccessCount: MetricConfig{Enabled: true}, + ApachedruidTaskFailedCount: MetricConfig{Enabled: true}, + ApachedruidTaskPendingCount: MetricConfig{Enabled: true}, + ApachedruidTaskPendingTime: MetricConfig{Enabled: true}, + ApachedruidTaskRunTime: MetricConfig{Enabled: true}, + ApachedruidTaskRunningCount: MetricConfig{Enabled: true}, + ApachedruidTaskSegmentAvailabilityWaitTime: MetricConfig{Enabled: true}, + ApachedruidTaskSuccessCount: MetricConfig{Enabled: true}, + ApachedruidTaskWaitingCount: MetricConfig{Enabled: true}, + ApachedruidTaskSlotBlacklistedCount: MetricConfig{Enabled: true}, + ApachedruidTaskSlotIdleCount: MetricConfig{Enabled: true}, + ApachedruidTaskSlotLazyCount: MetricConfig{Enabled: true}, + ApachedruidTaskSlotTotalCount: MetricConfig{Enabled: true}, + ApachedruidTaskSlotUsedCount: MetricConfig{Enabled: true}, + ApachedruidTierHistoricalCount: MetricConfig{Enabled: true}, + ApachedruidTierReplicationFactor: MetricConfig{Enabled: true}, + ApachedruidTierRequiredCapacity: MetricConfig{Enabled: true}, + ApachedruidTierTotalCapacity: MetricConfig{Enabled: true}, + ApachedruidWorkerTaskFailedCount: MetricConfig{Enabled: true}, + ApachedruidWorkerTaskSuccessCount: MetricConfig{Enabled: true}, + ApachedruidWorkerTaskSlotIdleCount: MetricConfig{Enabled: true}, + ApachedruidWorkerTaskSlotTotalCount: MetricConfig{Enabled: true}, + ApachedruidWorkerTaskSlotUsedCount: MetricConfig{Enabled: true}, + ApachedruidZkConnected: MetricConfig{Enabled: true}, + ApachedruidZkReconnectTime: MetricConfig{Enabled: true}, + }, + ResourceAttributes: ResourceAttributesConfig{ + ApachedruidClusterName: ResourceAttributeConfig{Enabled: true}, + ApachedruidNodeHost: ResourceAttributeConfig{Enabled: true}, + ApachedruidNodeService: ResourceAttributeConfig{Enabled: true}, + }, + }, + }, + { + name: "none_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis: MetricConfig{Enabled: false}, + ApachedruidCompactTaskCount: MetricConfig{Enabled: false}, + ApachedruidCompactTaskAvailableSlotCount: MetricConfig{Enabled: false}, + ApachedruidCompactTaskMaxSlotCount: MetricConfig{Enabled: false}, + ApachedruidCoordinatorGlobalTime: MetricConfig{Enabled: false}, + ApachedruidCoordinatorTime: MetricConfig{Enabled: false}, + ApachedruidIngestBytesReceived: MetricConfig{Enabled: false}, + ApachedruidIngestCount: MetricConfig{Enabled: false}, + ApachedruidIngestEventsBuffered: MetricConfig{Enabled: false}, + ApachedruidIngestEventsDuplicate: MetricConfig{Enabled: false}, + ApachedruidIngestEventsMessageGap: MetricConfig{Enabled: false}, + ApachedruidIngestEventsProcessed: MetricConfig{Enabled: false}, + ApachedruidIngestEventsProcessedWithError: MetricConfig{Enabled: false}, + ApachedruidIngestEventsThrownAway: MetricConfig{Enabled: false}, + ApachedruidIngestEventsUnparseable: MetricConfig{Enabled: false}, + ApachedruidIngestHandoffCount: MetricConfig{Enabled: false}, + ApachedruidIngestHandoffFailed: MetricConfig{Enabled: false}, + ApachedruidIngestHandoffTime: MetricConfig{Enabled: false}, + ApachedruidIngestInputBytes: MetricConfig{Enabled: false}, + ApachedruidIngestKafkaAvgLag: MetricConfig{Enabled: false}, + ApachedruidIngestKafkaLag: MetricConfig{Enabled: false}, + ApachedruidIngestKafkaMaxLag: MetricConfig{Enabled: false}, + ApachedruidIngestKafkaPartitionLag: MetricConfig{Enabled: false}, + ApachedruidIngestKinesisAvgLagTime: MetricConfig{Enabled: false}, + ApachedruidIngestKinesisLagTime: MetricConfig{Enabled: false}, + ApachedruidIngestKinesisMaxLagTime: MetricConfig{Enabled: false}, + ApachedruidIngestKinesisPartitionLagTime: MetricConfig{Enabled: false}, + ApachedruidIngestMergeCPU: MetricConfig{Enabled: false}, + ApachedruidIngestMergeTime: MetricConfig{Enabled: false}, + ApachedruidIngestNoticesQueueSize: MetricConfig{Enabled: false}, + ApachedruidIngestNoticesTime: MetricConfig{Enabled: false}, + ApachedruidIngestPauseTime: MetricConfig{Enabled: false}, + ApachedruidIngestPersistsBackPressure: MetricConfig{Enabled: false}, + ApachedruidIngestPersistsCount: MetricConfig{Enabled: false}, + ApachedruidIngestPersistsCPU: MetricConfig{Enabled: false}, + ApachedruidIngestPersistsFailed: MetricConfig{Enabled: false}, + ApachedruidIngestPersistsTime: MetricConfig{Enabled: false}, + ApachedruidIngestRowsOutput: MetricConfig{Enabled: false}, + ApachedruidIngestSegmentsCount: MetricConfig{Enabled: false}, + ApachedruidIngestShuffleBytes: MetricConfig{Enabled: false}, + ApachedruidIngestShuffleRequests: MetricConfig{Enabled: false}, + ApachedruidIngestSinkCount: MetricConfig{Enabled: false}, + ApachedruidIngestTombstonesCount: MetricConfig{Enabled: false}, + ApachedruidIntervalCompactedCount: MetricConfig{Enabled: false}, + ApachedruidIntervalSkipCompactCount: MetricConfig{Enabled: false}, + ApachedruidIntervalWaitCompactCount: MetricConfig{Enabled: false}, + ApachedruidJettyNumOpenConnections: MetricConfig{Enabled: false}, + ApachedruidJettyThreadPoolBusy: MetricConfig{Enabled: false}, + ApachedruidJettyThreadPoolIdle: MetricConfig{Enabled: false}, + ApachedruidJettyThreadPoolIsLowOnThreads: MetricConfig{Enabled: false}, + ApachedruidJettyThreadPoolMax: MetricConfig{Enabled: false}, + ApachedruidJettyThreadPoolMin: MetricConfig{Enabled: false}, + ApachedruidJettyThreadPoolQueueSize: MetricConfig{Enabled: false}, + ApachedruidJettyThreadPoolTotal: MetricConfig{Enabled: false}, + ApachedruidJvmBufferpoolCapacity: MetricConfig{Enabled: false}, + ApachedruidJvmBufferpoolCount: MetricConfig{Enabled: false}, + ApachedruidJvmBufferpoolUsed: MetricConfig{Enabled: false}, + ApachedruidJvmGcCount: MetricConfig{Enabled: false}, + ApachedruidJvmGcCPU: MetricConfig{Enabled: false}, + ApachedruidJvmMemCommitted: MetricConfig{Enabled: false}, + ApachedruidJvmMemInit: MetricConfig{Enabled: false}, + ApachedruidJvmMemMax: MetricConfig{Enabled: false}, + ApachedruidJvmMemUsed: MetricConfig{Enabled: false}, + ApachedruidJvmPoolCommitted: MetricConfig{Enabled: false}, + ApachedruidJvmPoolInit: MetricConfig{Enabled: false}, + ApachedruidJvmPoolMax: MetricConfig{Enabled: false}, + ApachedruidJvmPoolUsed: MetricConfig{Enabled: false}, + ApachedruidKillPendingSegmentsCount: MetricConfig{Enabled: false}, + ApachedruidKillTaskCount: MetricConfig{Enabled: false}, + ApachedruidKillTaskAvailableSlotCount: MetricConfig{Enabled: false}, + ApachedruidKillTaskMaxSlotCount: MetricConfig{Enabled: false}, + ApachedruidMergeBufferPendingRequests: MetricConfig{Enabled: false}, + ApachedruidMetadataKillAuditCount: MetricConfig{Enabled: false}, + ApachedruidMetadataKillCompactionCount: MetricConfig{Enabled: false}, + ApachedruidMetadataKillDatasourceCount: MetricConfig{Enabled: false}, + ApachedruidMetadataKillRuleCount: MetricConfig{Enabled: false}, + ApachedruidMetadataKillSupervisorCount: MetricConfig{Enabled: false}, + ApachedruidMetadatacacheInitTime: MetricConfig{Enabled: false}, + ApachedruidMetadatacacheRefreshCount: MetricConfig{Enabled: false}, + ApachedruidMetadatacacheRefreshTime: MetricConfig{Enabled: false}, + ApachedruidQueryByteLimitExceededCount: MetricConfig{Enabled: false}, + ApachedruidQueryBytes: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaAverageBytes: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaErrors: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaEvictions: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaHitRate: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaHits: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaMisses: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaNumEntries: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaPutError: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaPutOk: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaPutOversized: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaSizeBytes: MetricConfig{Enabled: false}, + ApachedruidQueryCacheDeltaTimeouts: MetricConfig{Enabled: false}, + ApachedruidQueryCacheMemcachedDelta: MetricConfig{Enabled: false}, + ApachedruidQueryCacheMemcachedTotal: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalAverageBytes: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalErrors: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalEvictions: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalHitRate: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalHits: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalMisses: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalNumEntries: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalPutError: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalPutOk: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalPutOversized: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalSizeBytes: MetricConfig{Enabled: false}, + ApachedruidQueryCacheTotalTimeouts: MetricConfig{Enabled: false}, + ApachedruidQueryCount: MetricConfig{Enabled: false}, + ApachedruidQueryCPUTime: MetricConfig{Enabled: false}, + ApachedruidQueryFailedCount: MetricConfig{Enabled: false}, + ApachedruidQueryInterruptedCount: MetricConfig{Enabled: false}, + ApachedruidQueryNodeBackpressure: MetricConfig{Enabled: false}, + ApachedruidQueryNodeBytes: MetricConfig{Enabled: false}, + ApachedruidQueryNodeTime: MetricConfig{Enabled: false}, + ApachedruidQueryNodeTtfb: MetricConfig{Enabled: false}, + ApachedruidQueryPriority: MetricConfig{Enabled: false}, + ApachedruidQueryRowLimitExceededCount: MetricConfig{Enabled: false}, + ApachedruidQuerySegmentTime: MetricConfig{Enabled: false}, + ApachedruidQuerySegmentAndCacheTime: MetricConfig{Enabled: false}, + ApachedruidQuerySegmentsCount: MetricConfig{Enabled: false}, + ApachedruidQuerySuccessCount: MetricConfig{Enabled: false}, + ApachedruidQueryTime: MetricConfig{Enabled: false}, + ApachedruidQueryTimeoutCount: MetricConfig{Enabled: false}, + ApachedruidQueryWaitTime: MetricConfig{Enabled: false}, + ApachedruidSegmentAddedBytes: MetricConfig{Enabled: false}, + ApachedruidSegmentAssignSkippedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentAssignedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentCompactedBytes: MetricConfig{Enabled: false}, + ApachedruidSegmentCompactedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentCount: MetricConfig{Enabled: false}, + ApachedruidSegmentDeletedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentDropQueueCount: MetricConfig{Enabled: false}, + ApachedruidSegmentDropSkippedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentDroppedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentLoadQueueAssigned: MetricConfig{Enabled: false}, + ApachedruidSegmentLoadQueueCancelled: MetricConfig{Enabled: false}, + ApachedruidSegmentLoadQueueCount: MetricConfig{Enabled: false}, + ApachedruidSegmentLoadQueueFailed: MetricConfig{Enabled: false}, + ApachedruidSegmentLoadQueueSize: MetricConfig{Enabled: false}, + ApachedruidSegmentLoadQueueSuccess: MetricConfig{Enabled: false}, + ApachedruidSegmentMax: MetricConfig{Enabled: false}, + ApachedruidSegmentMoveSkippedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentMovedBytes: MetricConfig{Enabled: false}, + ApachedruidSegmentMovedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentNukedBytes: MetricConfig{Enabled: false}, + ApachedruidSegmentOverShadowedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentPendingDelete: MetricConfig{Enabled: false}, + ApachedruidSegmentRowCountAvg: MetricConfig{Enabled: false}, + ApachedruidSegmentRowCountRangeCount: MetricConfig{Enabled: false}, + ApachedruidSegmentScanActive: MetricConfig{Enabled: false}, + ApachedruidSegmentScanPending: MetricConfig{Enabled: false}, + ApachedruidSegmentSize: MetricConfig{Enabled: false}, + ApachedruidSegmentSkipCompactBytes: MetricConfig{Enabled: false}, + ApachedruidSegmentSkipCompactCount: MetricConfig{Enabled: false}, + ApachedruidSegmentUnavailableCount: MetricConfig{Enabled: false}, + ApachedruidSegmentUnderReplicatedCount: MetricConfig{Enabled: false}, + ApachedruidSegmentUnneededCount: MetricConfig{Enabled: false}, + ApachedruidSegmentUsed: MetricConfig{Enabled: false}, + ApachedruidSegmentUsedPercent: MetricConfig{Enabled: false}, + ApachedruidSegmentWaitCompactBytes: MetricConfig{Enabled: false}, + ApachedruidSegmentWaitCompactCount: MetricConfig{Enabled: false}, + ApachedruidServerviewInitTime: MetricConfig{Enabled: false}, + ApachedruidServerviewSyncHealthy: MetricConfig{Enabled: false}, + ApachedruidServerviewSyncUnstableTime: MetricConfig{Enabled: false}, + ApachedruidSQLQueryBytes: MetricConfig{Enabled: false}, + ApachedruidSQLQueryPlanningTimeMs: MetricConfig{Enabled: false}, + ApachedruidSQLQueryTime: MetricConfig{Enabled: false}, + ApachedruidSubqueryByteLimitCount: MetricConfig{Enabled: false}, + ApachedruidSubqueryFallbackCount: MetricConfig{Enabled: false}, + ApachedruidSubqueryFallbackInsufficientTypeCount: MetricConfig{Enabled: false}, + ApachedruidSubqueryFallbackUnknownReasonCount: MetricConfig{Enabled: false}, + ApachedruidSubqueryRowLimitCount: MetricConfig{Enabled: false}, + ApachedruidSysCPU: MetricConfig{Enabled: false}, + ApachedruidSysDiskQueue: MetricConfig{Enabled: false}, + ApachedruidSysDiskReadCount: MetricConfig{Enabled: false}, + ApachedruidSysDiskReadSize: MetricConfig{Enabled: false}, + ApachedruidSysDiskTransferTime: MetricConfig{Enabled: false}, + ApachedruidSysDiskWriteCount: MetricConfig{Enabled: false}, + ApachedruidSysDiskWriteSize: MetricConfig{Enabled: false}, + ApachedruidSysFsFilesCount: MetricConfig{Enabled: false}, + ApachedruidSysFsFilesFree: MetricConfig{Enabled: false}, + ApachedruidSysFsMax: MetricConfig{Enabled: false}, + ApachedruidSysFsUsed: MetricConfig{Enabled: false}, + ApachedruidSysLa1: MetricConfig{Enabled: false}, + ApachedruidSysLa15: MetricConfig{Enabled: false}, + ApachedruidSysLa5: MetricConfig{Enabled: false}, + ApachedruidSysMemFree: MetricConfig{Enabled: false}, + ApachedruidSysMemMax: MetricConfig{Enabled: false}, + ApachedruidSysMemUsed: MetricConfig{Enabled: false}, + ApachedruidSysNetReadDropped: MetricConfig{Enabled: false}, + ApachedruidSysNetReadErrors: MetricConfig{Enabled: false}, + ApachedruidSysNetReadPackets: MetricConfig{Enabled: false}, + ApachedruidSysNetReadSize: MetricConfig{Enabled: false}, + ApachedruidSysNetWriteCollisions: MetricConfig{Enabled: false}, + ApachedruidSysNetWriteErrors: MetricConfig{Enabled: false}, + ApachedruidSysNetWritePackets: MetricConfig{Enabled: false}, + ApachedruidSysNetWriteSize: MetricConfig{Enabled: false}, + ApachedruidSysStorageUsed: MetricConfig{Enabled: false}, + ApachedruidSysSwapFree: MetricConfig{Enabled: false}, + ApachedruidSysSwapMax: MetricConfig{Enabled: false}, + ApachedruidSysSwapPageIn: MetricConfig{Enabled: false}, + ApachedruidSysSwapPageOut: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4ActiveOpens: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4AttemptFails: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4EstabResets: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4InErrs: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4InSegs: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4OutRsts: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4OutSegs: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4PassiveOpens: MetricConfig{Enabled: false}, + ApachedruidSysTcpv4RetransSegs: MetricConfig{Enabled: false}, + ApachedruidSysUptime: MetricConfig{Enabled: false}, + ApachedruidTaskActionBatchAttempts: MetricConfig{Enabled: false}, + ApachedruidTaskActionBatchQueueTime: MetricConfig{Enabled: false}, + ApachedruidTaskActionBatchRunTime: MetricConfig{Enabled: false}, + ApachedruidTaskActionBatchSize: MetricConfig{Enabled: false}, + ApachedruidTaskActionFailedCount: MetricConfig{Enabled: false}, + ApachedruidTaskActionLogTime: MetricConfig{Enabled: false}, + ApachedruidTaskActionRunTime: MetricConfig{Enabled: false}, + ApachedruidTaskActionSuccessCount: MetricConfig{Enabled: false}, + ApachedruidTaskFailedCount: MetricConfig{Enabled: false}, + ApachedruidTaskPendingCount: MetricConfig{Enabled: false}, + ApachedruidTaskPendingTime: MetricConfig{Enabled: false}, + ApachedruidTaskRunTime: MetricConfig{Enabled: false}, + ApachedruidTaskRunningCount: MetricConfig{Enabled: false}, + ApachedruidTaskSegmentAvailabilityWaitTime: MetricConfig{Enabled: false}, + ApachedruidTaskSuccessCount: MetricConfig{Enabled: false}, + ApachedruidTaskWaitingCount: MetricConfig{Enabled: false}, + ApachedruidTaskSlotBlacklistedCount: MetricConfig{Enabled: false}, + ApachedruidTaskSlotIdleCount: MetricConfig{Enabled: false}, + ApachedruidTaskSlotLazyCount: MetricConfig{Enabled: false}, + ApachedruidTaskSlotTotalCount: MetricConfig{Enabled: false}, + ApachedruidTaskSlotUsedCount: MetricConfig{Enabled: false}, + ApachedruidTierHistoricalCount: MetricConfig{Enabled: false}, + ApachedruidTierReplicationFactor: MetricConfig{Enabled: false}, + ApachedruidTierRequiredCapacity: MetricConfig{Enabled: false}, + ApachedruidTierTotalCapacity: MetricConfig{Enabled: false}, + ApachedruidWorkerTaskFailedCount: MetricConfig{Enabled: false}, + ApachedruidWorkerTaskSuccessCount: MetricConfig{Enabled: false}, + ApachedruidWorkerTaskSlotIdleCount: MetricConfig{Enabled: false}, + ApachedruidWorkerTaskSlotTotalCount: MetricConfig{Enabled: false}, + ApachedruidWorkerTaskSlotUsedCount: MetricConfig{Enabled: false}, + ApachedruidZkConnected: MetricConfig{Enabled: false}, + ApachedruidZkReconnectTime: MetricConfig{Enabled: false}, + }, + ResourceAttributes: ResourceAttributesConfig{ + ApachedruidClusterName: ResourceAttributeConfig{Enabled: false}, + ApachedruidNodeHost: ResourceAttributeConfig{Enabled: false}, + ApachedruidNodeService: ResourceAttributeConfig{Enabled: false}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadMetricsBuilderConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + cfg := DefaultMetricsBuilderConfig() + require.NoError(t, component.UnmarshalConfig(sub, &cfg)) + return cfg +} + +func TestResourceAttributesConfig(t *testing.T) { + tests := []struct { + name string + want ResourceAttributesConfig + }{ + { + name: "default", + want: DefaultResourceAttributesConfig(), + }, + { + name: "all_set", + want: ResourceAttributesConfig{ + ApachedruidClusterName: ResourceAttributeConfig{Enabled: true}, + ApachedruidNodeHost: ResourceAttributeConfig{Enabled: true}, + ApachedruidNodeService: ResourceAttributeConfig{Enabled: true}, + }, + }, + { + name: "none_set", + want: ResourceAttributesConfig{ + ApachedruidClusterName: ResourceAttributeConfig{Enabled: false}, + ApachedruidNodeHost: ResourceAttributeConfig{Enabled: false}, + ApachedruidNodeService: ResourceAttributeConfig{Enabled: false}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadResourceAttributesConfig(t *testing.T, name string) ResourceAttributesConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + sub, err = sub.Sub("resource_attributes") + require.NoError(t, err) + cfg := DefaultResourceAttributesConfig() + require.NoError(t, component.UnmarshalConfig(sub, &cfg)) + return cfg +} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_metrics.go b/receiver/apachedruidreceiver/internal/metadata/generated_metrics.go new file mode 100644 index 0000000000000..f28e9095cf8d6 --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/generated_metrics.go @@ -0,0 +1,14859 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" +) + +type metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.compact.segment_analyzer.fetch_and_process_millis metric with initial data. +func (m *metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis) init() { + m.data.SetName("apachedruid.compact.segment_analyzer.fetch_and_process_millis") + m.data.SetDescription("Time taken to fetch and process segments to infer the schema for the compaction task to run.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, compactTaskTypeAttributeValue string, compactDataSourceAttributeValue string, compactGroupIDAttributeValue string, compactTagsAttributeValue string, compactTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", compactTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", compactDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", compactGroupIDAttributeValue) + dp.Attributes().PutStr("tags", compactTagsAttributeValue) + dp.Attributes().PutStr("task_id", compactTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis(cfg MetricConfig) metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis { + m := metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidCompactTaskCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.compact.task.count metric with initial data. +func (m *metricApachedruidCompactTaskCount) init() { + m.data.SetName("apachedruid.compact.task.count") + m.data.SetDescription("Number of tasks issued in the auto compaction run.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidCompactTaskCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidCompactTaskCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidCompactTaskCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidCompactTaskCount(cfg MetricConfig) metricApachedruidCompactTaskCount { + m := metricApachedruidCompactTaskCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidCompactTaskAvailableSlotCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.compact_task.available_slot.count metric with initial data. +func (m *metricApachedruidCompactTaskAvailableSlotCount) init() { + m.data.SetName("apachedruid.compact_task.available_slot.count") + m.data.SetDescription("Number of available task slots that can be used for auto compaction tasks in the auto compaction run. This is the max number of task slots minus any currently running compaction tasks.") + m.data.SetUnit("{slots}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidCompactTaskAvailableSlotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidCompactTaskAvailableSlotCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidCompactTaskAvailableSlotCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidCompactTaskAvailableSlotCount(cfg MetricConfig) metricApachedruidCompactTaskAvailableSlotCount { + m := metricApachedruidCompactTaskAvailableSlotCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidCompactTaskMaxSlotCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.compact_task.max_slot.count metric with initial data. +func (m *metricApachedruidCompactTaskMaxSlotCount) init() { + m.data.SetName("apachedruid.compact_task.max_slot.count") + m.data.SetDescription("Maximum number of task slots available for auto compaction tasks in the auto compaction run.") + m.data.SetUnit("{slots}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidCompactTaskMaxSlotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidCompactTaskMaxSlotCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidCompactTaskMaxSlotCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidCompactTaskMaxSlotCount(cfg MetricConfig) metricApachedruidCompactTaskMaxSlotCount { + m := metricApachedruidCompactTaskMaxSlotCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidCoordinatorGlobalTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.coordinator.global.time metric with initial data. +func (m *metricApachedruidCoordinatorGlobalTime) init() { + m.data.SetName("apachedruid.coordinator.global.time") + m.data.SetDescription("Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. For example, Historical Management or Indexing.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidCoordinatorGlobalTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coordinatorDutyGroupAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("duty_group", coordinatorDutyGroupAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidCoordinatorGlobalTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidCoordinatorGlobalTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidCoordinatorGlobalTime(cfg MetricConfig) metricApachedruidCoordinatorGlobalTime { + m := metricApachedruidCoordinatorGlobalTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidCoordinatorTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.coordinator.time metric with initial data. +func (m *metricApachedruidCoordinatorTime) init() { + m.data.SetName("apachedruid.coordinator.time") + m.data.SetDescription("Approximate Coordinator duty runtime in milliseconds.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidCoordinatorTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coordinatorDutyAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("duty", coordinatorDutyAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidCoordinatorTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidCoordinatorTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidCoordinatorTime(cfg MetricConfig) metricApachedruidCoordinatorTime { + m := metricApachedruidCoordinatorTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestBytesReceived struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.bytes.received metric with initial data. +func (m *metricApachedruidIngestBytesReceived) init() { + m.data.SetName("apachedruid.ingest.bytes.received") + m.data.SetDescription("Number of bytes received by the `EventReceiverFirehose`.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestBytesReceived) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string, ingestServiceNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("service_name", ingestServiceNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestBytesReceived) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestBytesReceived) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestBytesReceived(cfg MetricConfig) metricApachedruidIngestBytesReceived { + m := metricApachedruidIngestBytesReceived{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.count metric with initial data. +func (m *metricApachedruidIngestCount) init() { + m.data.SetName("apachedruid.ingest.count") + m.data.SetDescription("Count of `1` every time an ingestion job runs (includes compaction jobs). Aggregate using dimensions.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) + dp.Attributes().PutStr("task_ingestion_mode", ingestTaskIngestionModeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestCount(cfg MetricConfig) metricApachedruidIngestCount { + m := metricApachedruidIngestCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestEventsBuffered struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.events.buffered metric with initial data. +func (m *metricApachedruidIngestEventsBuffered) init() { + m.data.SetName("apachedruid.ingest.events.buffered") + m.data.SetDescription("Number of events queued in the `EventReceiverFirehose` buffer.") + m.data.SetUnit("{events}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestEventsBuffered) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestServiceNameAttributeValue string, ingestBufferCapacityAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("service_name", ingestServiceNameAttributeValue) + dp.Attributes().PutStr("buffer_capacity", ingestBufferCapacityAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestEventsBuffered) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestEventsBuffered) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestEventsBuffered(cfg MetricConfig) metricApachedruidIngestEventsBuffered { + m := metricApachedruidIngestEventsBuffered{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestEventsDuplicate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.events.duplicate metric with initial data. +func (m *metricApachedruidIngestEventsDuplicate) init() { + m.data.SetName("apachedruid.ingest.events.duplicate") + m.data.SetDescription("Number of events rejected because the events are duplicated.") + m.data.SetUnit("{events}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestEventsDuplicate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestEventsDuplicate) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestEventsDuplicate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestEventsDuplicate(cfg MetricConfig) metricApachedruidIngestEventsDuplicate { + m := metricApachedruidIngestEventsDuplicate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestEventsMessageGap struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.events.message_gap metric with initial data. +func (m *metricApachedruidIngestEventsMessageGap) init() { + m.data.SetName("apachedruid.ingest.events.message_gap") + m.data.SetDescription("Time gap in milliseconds between the latest ingested event timestamp and the current system timestamp of metrics emission. If the value is increasing but lag is low, Druid may not be receiving new data. This metric is reset as new tasks spawn up.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestEventsMessageGap) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestEventsMessageGap) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestEventsMessageGap) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestEventsMessageGap(cfg MetricConfig) metricApachedruidIngestEventsMessageGap { + m := metricApachedruidIngestEventsMessageGap{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestEventsProcessed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.events.processed metric with initial data. +func (m *metricApachedruidIngestEventsProcessed) init() { + m.data.SetName("apachedruid.ingest.events.processed") + m.data.SetDescription("Number of events processed per emission period.") + m.data.SetUnit("{events}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestEventsProcessed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestEventsProcessed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestEventsProcessed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestEventsProcessed(cfg MetricConfig) metricApachedruidIngestEventsProcessed { + m := metricApachedruidIngestEventsProcessed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestEventsProcessedWithError struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.events.processed_with_error metric with initial data. +func (m *metricApachedruidIngestEventsProcessedWithError) init() { + m.data.SetName("apachedruid.ingest.events.processed_with_error") + m.data.SetDescription("Number of events processed with some partial errors per emission period. Events processed with partial errors are counted towards both this metric and `ingest/events/processed`.") + m.data.SetUnit("{events}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestEventsProcessedWithError) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestEventsProcessedWithError) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestEventsProcessedWithError) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestEventsProcessedWithError(cfg MetricConfig) metricApachedruidIngestEventsProcessedWithError { + m := metricApachedruidIngestEventsProcessedWithError{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestEventsThrownAway struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.events.thrown_away metric with initial data. +func (m *metricApachedruidIngestEventsThrownAway) init() { + m.data.SetName("apachedruid.ingest.events.thrown_away") + m.data.SetDescription("Number of events rejected because they are null, or filtered by `transformSpec`, or outside one of `lateMessageRejectionPeriod`, `earlyMessageRejectionPeriod`, or `windowPeriod`.") + m.data.SetUnit("{events}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestEventsThrownAway) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestEventsThrownAway) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestEventsThrownAway) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestEventsThrownAway(cfg MetricConfig) metricApachedruidIngestEventsThrownAway { + m := metricApachedruidIngestEventsThrownAway{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestEventsUnparseable struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.events.unparseable metric with initial data. +func (m *metricApachedruidIngestEventsUnparseable) init() { + m.data.SetName("apachedruid.ingest.events.unparseable") + m.data.SetDescription("Number of events rejected because the events are unparseable.") + m.data.SetUnit("{events}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestEventsUnparseable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestEventsUnparseable) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestEventsUnparseable) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestEventsUnparseable(cfg MetricConfig) metricApachedruidIngestEventsUnparseable { + m := metricApachedruidIngestEventsUnparseable{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestHandoffCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.handoff.count metric with initial data. +func (m *metricApachedruidIngestHandoffCount) init() { + m.data.SetName("apachedruid.ingest.handoff.count") + m.data.SetDescription("Number of handoffs that happened.") + m.data.SetUnit("{handoffs}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestHandoffCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestHandoffCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestHandoffCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestHandoffCount(cfg MetricConfig) metricApachedruidIngestHandoffCount { + m := metricApachedruidIngestHandoffCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestHandoffFailed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.handoff.failed metric with initial data. +func (m *metricApachedruidIngestHandoffFailed) init() { + m.data.SetName("apachedruid.ingest.handoff.failed") + m.data.SetDescription("Number of handoffs that failed.") + m.data.SetUnit("{handoffs}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestHandoffFailed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestHandoffFailed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestHandoffFailed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestHandoffFailed(cfg MetricConfig) metricApachedruidIngestHandoffFailed { + m := metricApachedruidIngestHandoffFailed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestHandoffTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.handoff.time metric with initial data. +func (m *metricApachedruidIngestHandoffTime) init() { + m.data.SetName("apachedruid.ingest.handoff.time") + m.data.SetDescription("Total number of milliseconds taken to handoff a set of segments.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestHandoffTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestHandoffTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestHandoffTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestHandoffTime(cfg MetricConfig) metricApachedruidIngestHandoffTime { + m := metricApachedruidIngestHandoffTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestInputBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.input.bytes metric with initial data. +func (m *metricApachedruidIngestInputBytes) init() { + m.data.SetName("apachedruid.ingest.input.bytes") + m.data.SetDescription("Number of bytes read from input sources, after decompression but prior to parsing. This covers all data read, including data that does not end up being fully processed and ingested. For example, this includes data that ends up being rejected for being unparseable or filtered out.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestInputBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestInputBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestInputBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestInputBytes(cfg MetricConfig) metricApachedruidIngestInputBytes { + m := metricApachedruidIngestInputBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestKafkaAvgLag struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.kafka.avg_lag metric with initial data. +func (m *metricApachedruidIngestKafkaAvgLag) init() { + m.data.SetName("apachedruid.ingest.kafka.avg_lag") + m.data.SetDescription("Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestKafkaAvgLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("stream", ingestStreamAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestKafkaAvgLag) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestKafkaAvgLag) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestKafkaAvgLag(cfg MetricConfig) metricApachedruidIngestKafkaAvgLag { + m := metricApachedruidIngestKafkaAvgLag{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestKafkaLag struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.kafka.lag metric with initial data. +func (m *metricApachedruidIngestKafkaLag) init() { + m.data.SetName("apachedruid.ingest.kafka.lag") + m.data.SetDescription("Total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestKafkaLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("stream", ingestStreamAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestKafkaLag) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestKafkaLag) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestKafkaLag(cfg MetricConfig) metricApachedruidIngestKafkaLag { + m := metricApachedruidIngestKafkaLag{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestKafkaMaxLag struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.kafka.max_lag metric with initial data. +func (m *metricApachedruidIngestKafkaMaxLag) init() { + m.data.SetName("apachedruid.ingest.kafka.max_lag") + m.data.SetDescription("Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestKafkaMaxLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("stream", ingestStreamAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestKafkaMaxLag) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestKafkaMaxLag) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestKafkaMaxLag(cfg MetricConfig) metricApachedruidIngestKafkaMaxLag { + m := metricApachedruidIngestKafkaMaxLag{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestKafkaPartitionLag struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.kafka.partition_lag metric with initial data. +func (m *metricApachedruidIngestKafkaPartitionLag) init() { + m.data.SetName("apachedruid.ingest.kafka.partition_lag") + m.data.SetDescription("Partition-wise lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers. Minimum emission period for this metric is a minute.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestKafkaPartitionLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestPartitionAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("partition", ingestPartitionAttributeValue) + dp.Attributes().PutStr("stream", ingestStreamAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestKafkaPartitionLag) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestKafkaPartitionLag) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestKafkaPartitionLag(cfg MetricConfig) metricApachedruidIngestKafkaPartitionLag { + m := metricApachedruidIngestKafkaPartitionLag{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestKinesisAvgLagTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.kinesis.avg_lag.time metric with initial data. +func (m *metricApachedruidIngestKinesisAvgLagTime) init() { + m.data.SetName("apachedruid.ingest.kinesis.avg_lag.time") + m.data.SetDescription("Average lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestKinesisAvgLagTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("stream", ingestStreamAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestKinesisAvgLagTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestKinesisAvgLagTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestKinesisAvgLagTime(cfg MetricConfig) metricApachedruidIngestKinesisAvgLagTime { + m := metricApachedruidIngestKinesisAvgLagTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestKinesisLagTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.kinesis.lag.time metric with initial data. +func (m *metricApachedruidIngestKinesisLagTime) init() { + m.data.SetName("apachedruid.ingest.kinesis.lag.time") + m.data.SetDescription("Total lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestKinesisLagTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("stream", ingestStreamAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestKinesisLagTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestKinesisLagTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestKinesisLagTime(cfg MetricConfig) metricApachedruidIngestKinesisLagTime { + m := metricApachedruidIngestKinesisLagTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestKinesisMaxLagTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.kinesis.max_lag.time metric with initial data. +func (m *metricApachedruidIngestKinesisMaxLagTime) init() { + m.data.SetName("apachedruid.ingest.kinesis.max_lag.time") + m.data.SetDescription("Max lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestKinesisMaxLagTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("stream", ingestStreamAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestKinesisMaxLagTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestKinesisMaxLagTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestKinesisMaxLagTime(cfg MetricConfig) metricApachedruidIngestKinesisMaxLagTime { + m := metricApachedruidIngestKinesisMaxLagTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestKinesisPartitionLagTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.kinesis.partition_lag.time metric with initial data. +func (m *metricApachedruidIngestKinesisPartitionLagTime) init() { + m.data.SetName("apachedruid.ingest.kinesis.partition_lag.time") + m.data.SetDescription("Partition-wise lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis. Minimum emission period for this metric is a minute.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestKinesisPartitionLagTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestPartitionAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("partition", ingestPartitionAttributeValue) + dp.Attributes().PutStr("stream", ingestStreamAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestKinesisPartitionLagTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestKinesisPartitionLagTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestKinesisPartitionLagTime(cfg MetricConfig) metricApachedruidIngestKinesisPartitionLagTime { + m := metricApachedruidIngestKinesisPartitionLagTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestMergeCPU struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.merge.cpu metric with initial data. +func (m *metricApachedruidIngestMergeCPU) init() { + m.data.SetName("apachedruid.ingest.merge.cpu") + m.data.SetDescription("CPU time in Nanoseconds spent on merging intermediate segments.") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestMergeCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestMergeCPU) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestMergeCPU) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestMergeCPU(cfg MetricConfig) metricApachedruidIngestMergeCPU { + m := metricApachedruidIngestMergeCPU{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestMergeTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.merge.time metric with initial data. +func (m *metricApachedruidIngestMergeTime) init() { + m.data.SetName("apachedruid.ingest.merge.time") + m.data.SetDescription("Milliseconds spent merging intermediate segments.") + m.data.SetUnit("ms") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestMergeTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestMergeTime) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestMergeTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestMergeTime(cfg MetricConfig) metricApachedruidIngestMergeTime { + m := metricApachedruidIngestMergeTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestNoticesQueueSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.notices.queue_size metric with initial data. +func (m *metricApachedruidIngestNoticesQueueSize) init() { + m.data.SetName("apachedruid.ingest.notices.queue_size") + m.data.SetDescription("Number of pending notices to be processed by the coordinator.") + m.data.SetUnit("{notices}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestNoticesQueueSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestNoticesQueueSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestNoticesQueueSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestNoticesQueueSize(cfg MetricConfig) metricApachedruidIngestNoticesQueueSize { + m := metricApachedruidIngestNoticesQueueSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestNoticesTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.notices.time metric with initial data. +func (m *metricApachedruidIngestNoticesTime) init() { + m.data.SetName("apachedruid.ingest.notices.time") + m.data.SetDescription("Milliseconds taken to process a notice by the supervisor.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestNoticesTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestNoticesTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestNoticesTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestNoticesTime(cfg MetricConfig) metricApachedruidIngestNoticesTime { + m := metricApachedruidIngestNoticesTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestPauseTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.pause.time metric with initial data. +func (m *metricApachedruidIngestPauseTime) init() { + m.data.SetName("apachedruid.ingest.pause.time") + m.data.SetDescription("Milliseconds spent by a task in a paused state without ingesting.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestPauseTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestPauseTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestPauseTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestPauseTime(cfg MetricConfig) metricApachedruidIngestPauseTime { + m := metricApachedruidIngestPauseTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestPersistsBackPressure struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.persists.back_pressure metric with initial data. +func (m *metricApachedruidIngestPersistsBackPressure) init() { + m.data.SetName("apachedruid.ingest.persists.back_pressure") + m.data.SetDescription("Milliseconds spent creating persist tasks and blocking waiting for them to finish.") + m.data.SetUnit("ms") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestPersistsBackPressure) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestPersistsBackPressure) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestPersistsBackPressure) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestPersistsBackPressure(cfg MetricConfig) metricApachedruidIngestPersistsBackPressure { + m := metricApachedruidIngestPersistsBackPressure{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestPersistsCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.persists.count metric with initial data. +func (m *metricApachedruidIngestPersistsCount) init() { + m.data.SetName("apachedruid.ingest.persists.count") + m.data.SetDescription("Number of times persist occurred.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestPersistsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestPersistsCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestPersistsCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestPersistsCount(cfg MetricConfig) metricApachedruidIngestPersistsCount { + m := metricApachedruidIngestPersistsCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestPersistsCPU struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.persists.cpu metric with initial data. +func (m *metricApachedruidIngestPersistsCPU) init() { + m.data.SetName("apachedruid.ingest.persists.cpu") + m.data.SetDescription("CPU time in nanoseconds spent on doing intermediate persist.") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestPersistsCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestPersistsCPU) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestPersistsCPU) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestPersistsCPU(cfg MetricConfig) metricApachedruidIngestPersistsCPU { + m := metricApachedruidIngestPersistsCPU{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestPersistsFailed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.persists.failed metric with initial data. +func (m *metricApachedruidIngestPersistsFailed) init() { + m.data.SetName("apachedruid.ingest.persists.failed") + m.data.SetDescription("Number of persists that failed.") + m.data.SetUnit("{persists}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestPersistsFailed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestPersistsFailed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestPersistsFailed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestPersistsFailed(cfg MetricConfig) metricApachedruidIngestPersistsFailed { + m := metricApachedruidIngestPersistsFailed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestPersistsTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.persists.time metric with initial data. +func (m *metricApachedruidIngestPersistsTime) init() { + m.data.SetName("apachedruid.ingest.persists.time") + m.data.SetDescription("Milliseconds spent doing intermediate persist.") + m.data.SetUnit("ms") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestPersistsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestPersistsTime) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestPersistsTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestPersistsTime(cfg MetricConfig) metricApachedruidIngestPersistsTime { + m := metricApachedruidIngestPersistsTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestRowsOutput struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.rows.output metric with initial data. +func (m *metricApachedruidIngestRowsOutput) init() { + m.data.SetName("apachedruid.ingest.rows.output") + m.data.SetDescription("Number of Druid rows persisted.") + m.data.SetUnit("{rows}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestRowsOutput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestRowsOutput) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestRowsOutput) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestRowsOutput(cfg MetricConfig) metricApachedruidIngestRowsOutput { + m := metricApachedruidIngestRowsOutput{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestSegmentsCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.segments.count metric with initial data. +func (m *metricApachedruidIngestSegmentsCount) init() { + m.data.SetName("apachedruid.ingest.segments.count") + m.data.SetDescription("Count of final segments created by job (includes tombstones).") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestSegmentsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) + dp.Attributes().PutStr("task_ingestion_mode", ingestTaskIngestionModeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestSegmentsCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestSegmentsCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestSegmentsCount(cfg MetricConfig) metricApachedruidIngestSegmentsCount { + m := metricApachedruidIngestSegmentsCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestShuffleBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.shuffle.bytes metric with initial data. +func (m *metricApachedruidIngestShuffleBytes) init() { + m.data.SetName("apachedruid.ingest.shuffle.bytes") + m.data.SetDescription("Number of bytes shuffled per emission period.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestShuffleBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestSupervisorTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("supervisor_task_id", ingestSupervisorTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestShuffleBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestShuffleBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestShuffleBytes(cfg MetricConfig) metricApachedruidIngestShuffleBytes { + m := metricApachedruidIngestShuffleBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestShuffleRequests struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.shuffle.requests metric with initial data. +func (m *metricApachedruidIngestShuffleRequests) init() { + m.data.SetName("apachedruid.ingest.shuffle.requests") + m.data.SetDescription("Number of shuffle requests per emission period.") + m.data.SetUnit("{requests}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestShuffleRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestSupervisorTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("supervisor_task_id", ingestSupervisorTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestShuffleRequests) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestShuffleRequests) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestShuffleRequests(cfg MetricConfig) metricApachedruidIngestShuffleRequests { + m := metricApachedruidIngestShuffleRequests{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestSinkCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.sink.count metric with initial data. +func (m *metricApachedruidIngestSinkCount) init() { + m.data.SetName("apachedruid.ingest.sink.count") + m.data.SetDescription("Number of sinks not handed off.") + m.data.SetUnit("{sinks}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestSinkCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestSinkCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestSinkCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestSinkCount(cfg MetricConfig) metricApachedruidIngestSinkCount { + m := metricApachedruidIngestSinkCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIngestTombstonesCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.ingest.tombstones.count metric with initial data. +func (m *metricApachedruidIngestTombstonesCount) init() { + m.data.SetName("apachedruid.ingest.tombstones.count") + m.data.SetDescription("Count of tombstones created by job.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIngestTombstonesCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) + dp.Attributes().PutStr("tags", ingestTagsAttributeValue) + dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) + dp.Attributes().PutStr("task_ingestion_mode", ingestTaskIngestionModeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIngestTombstonesCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIngestTombstonesCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIngestTombstonesCount(cfg MetricConfig) metricApachedruidIngestTombstonesCount { + m := metricApachedruidIngestTombstonesCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIntervalCompactedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.interval.compacted.count metric with initial data. +func (m *metricApachedruidIntervalCompactedCount) init() { + m.data.SetName("apachedruid.interval.compacted.count") + m.data.SetDescription("Total number of intervals of this datasource that are already compacted with the spec set in the auto compaction config.") + m.data.SetUnit("{intervals}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIntervalCompactedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", intervalDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIntervalCompactedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIntervalCompactedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIntervalCompactedCount(cfg MetricConfig) metricApachedruidIntervalCompactedCount { + m := metricApachedruidIntervalCompactedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIntervalSkipCompactCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.interval.skip_compact.count metric with initial data. +func (m *metricApachedruidIntervalSkipCompactCount) init() { + m.data.SetName("apachedruid.interval.skip_compact.count") + m.data.SetDescription("Total number of intervals of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.") + m.data.SetUnit("{intervals}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIntervalSkipCompactCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", intervalDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIntervalSkipCompactCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIntervalSkipCompactCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIntervalSkipCompactCount(cfg MetricConfig) metricApachedruidIntervalSkipCompactCount { + m := metricApachedruidIntervalSkipCompactCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidIntervalWaitCompactCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.interval.wait_compact.count metric with initial data. +func (m *metricApachedruidIntervalWaitCompactCount) init() { + m.data.SetName("apachedruid.interval.wait_compact.count") + m.data.SetDescription("Total number of intervals of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).") + m.data.SetUnit("{intervals}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidIntervalWaitCompactCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", intervalDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidIntervalWaitCompactCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidIntervalWaitCompactCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidIntervalWaitCompactCount(cfg MetricConfig) metricApachedruidIntervalWaitCompactCount { + m := metricApachedruidIntervalWaitCompactCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJettyNumOpenConnections struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jetty.num_open_connections metric with initial data. +func (m *metricApachedruidJettyNumOpenConnections) init() { + m.data.SetName("apachedruid.jetty.num_open_connections") + m.data.SetDescription("Number of open jetty connections.") + m.data.SetUnit("{connections}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidJettyNumOpenConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJettyNumOpenConnections) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJettyNumOpenConnections) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJettyNumOpenConnections(cfg MetricConfig) metricApachedruidJettyNumOpenConnections { + m := metricApachedruidJettyNumOpenConnections{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJettyThreadPoolBusy struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jetty.thread_pool.busy metric with initial data. +func (m *metricApachedruidJettyThreadPoolBusy) init() { + m.data.SetName("apachedruid.jetty.thread_pool.busy") + m.data.SetDescription("Number of busy threads that has work to do from the worker queue.") + m.data.SetUnit("{threads}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidJettyThreadPoolBusy) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJettyThreadPoolBusy) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJettyThreadPoolBusy) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJettyThreadPoolBusy(cfg MetricConfig) metricApachedruidJettyThreadPoolBusy { + m := metricApachedruidJettyThreadPoolBusy{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJettyThreadPoolIdle struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jetty.thread_pool.idle metric with initial data. +func (m *metricApachedruidJettyThreadPoolIdle) init() { + m.data.SetName("apachedruid.jetty.thread_pool.idle") + m.data.SetDescription("Number of idle threads.") + m.data.SetUnit("{threads}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidJettyThreadPoolIdle) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJettyThreadPoolIdle) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJettyThreadPoolIdle) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJettyThreadPoolIdle(cfg MetricConfig) metricApachedruidJettyThreadPoolIdle { + m := metricApachedruidJettyThreadPoolIdle{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJettyThreadPoolIsLowOnThreads struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jetty.thread_pool.is_low_on_threads metric with initial data. +func (m *metricApachedruidJettyThreadPoolIsLowOnThreads) init() { + m.data.SetName("apachedruid.jetty.thread_pool.is_low_on_threads") + m.data.SetDescription("A rough indicator of whether number of total workable threads allocated is enough to handle the works in the work queue.") + m.data.SetUnit("{threads}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidJettyThreadPoolIsLowOnThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJettyThreadPoolIsLowOnThreads) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJettyThreadPoolIsLowOnThreads) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJettyThreadPoolIsLowOnThreads(cfg MetricConfig) metricApachedruidJettyThreadPoolIsLowOnThreads { + m := metricApachedruidJettyThreadPoolIsLowOnThreads{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJettyThreadPoolMax struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jetty.thread_pool.max metric with initial data. +func (m *metricApachedruidJettyThreadPoolMax) init() { + m.data.SetName("apachedruid.jetty.thread_pool.max") + m.data.SetDescription("Number of maximum threads allocatable.") + m.data.SetUnit("{threads}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidJettyThreadPoolMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJettyThreadPoolMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJettyThreadPoolMax) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJettyThreadPoolMax(cfg MetricConfig) metricApachedruidJettyThreadPoolMax { + m := metricApachedruidJettyThreadPoolMax{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJettyThreadPoolMin struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jetty.thread_pool.min metric with initial data. +func (m *metricApachedruidJettyThreadPoolMin) init() { + m.data.SetName("apachedruid.jetty.thread_pool.min") + m.data.SetDescription("Number of minimum threads allocatable.") + m.data.SetUnit("{threads}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidJettyThreadPoolMin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJettyThreadPoolMin) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJettyThreadPoolMin) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJettyThreadPoolMin(cfg MetricConfig) metricApachedruidJettyThreadPoolMin { + m := metricApachedruidJettyThreadPoolMin{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJettyThreadPoolQueueSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jetty.thread_pool.queue_size metric with initial data. +func (m *metricApachedruidJettyThreadPoolQueueSize) init() { + m.data.SetName("apachedruid.jetty.thread_pool.queue_size") + m.data.SetDescription("Size of the worker queue.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidJettyThreadPoolQueueSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJettyThreadPoolQueueSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJettyThreadPoolQueueSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJettyThreadPoolQueueSize(cfg MetricConfig) metricApachedruidJettyThreadPoolQueueSize { + m := metricApachedruidJettyThreadPoolQueueSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJettyThreadPoolTotal struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jetty.thread_pool.total metric with initial data. +func (m *metricApachedruidJettyThreadPoolTotal) init() { + m.data.SetName("apachedruid.jetty.thread_pool.total") + m.data.SetDescription("Number of total workable threads allocated.") + m.data.SetUnit("{threads}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidJettyThreadPoolTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJettyThreadPoolTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJettyThreadPoolTotal) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJettyThreadPoolTotal(cfg MetricConfig) metricApachedruidJettyThreadPoolTotal { + m := metricApachedruidJettyThreadPoolTotal{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmBufferpoolCapacity struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.bufferpool.capacity metric with initial data. +func (m *metricApachedruidJvmBufferpoolCapacity) init() { + m.data.SetName("apachedruid.jvm.bufferpool.capacity") + m.data.SetDescription("Bufferpool capacity.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmBufferpoolCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("bufferpool_name", jvmBufferpoolNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmBufferpoolCapacity) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmBufferpoolCapacity) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmBufferpoolCapacity(cfg MetricConfig) metricApachedruidJvmBufferpoolCapacity { + m := metricApachedruidJvmBufferpoolCapacity{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmBufferpoolCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.bufferpool.count metric with initial data. +func (m *metricApachedruidJvmBufferpoolCount) init() { + m.data.SetName("apachedruid.jvm.bufferpool.count") + m.data.SetDescription("Bufferpool count.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmBufferpoolCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("bufferpool_name", jvmBufferpoolNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmBufferpoolCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmBufferpoolCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmBufferpoolCount(cfg MetricConfig) metricApachedruidJvmBufferpoolCount { + m := metricApachedruidJvmBufferpoolCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmBufferpoolUsed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.bufferpool.used metric with initial data. +func (m *metricApachedruidJvmBufferpoolUsed) init() { + m.data.SetName("apachedruid.jvm.bufferpool.used") + m.data.SetDescription("Bufferpool used.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmBufferpoolUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("bufferpool_name", jvmBufferpoolNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmBufferpoolUsed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmBufferpoolUsed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmBufferpoolUsed(cfg MetricConfig) metricApachedruidJvmBufferpoolUsed { + m := metricApachedruidJvmBufferpoolUsed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmGcCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.gc.count metric with initial data. +func (m *metricApachedruidJvmGcCount) init() { + m.data.SetName("apachedruid.jvm.gc.count") + m.data.SetDescription("Garbage collection count.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmGcCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmGcGenAttributeValue string, jvmGcNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("gc_gen", jvmGcGenAttributeValue) + dp.Attributes().PutStr("gc_name", jvmGcNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmGcCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmGcCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmGcCount(cfg MetricConfig) metricApachedruidJvmGcCount { + m := metricApachedruidJvmGcCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmGcCPU struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.gc.cpu metric with initial data. +func (m *metricApachedruidJvmGcCPU) init() { + m.data.SetName("apachedruid.jvm.gc.cpu") + m.data.SetDescription("Count of CPU time in Nanoseconds spent on garbage collection. Note, `jvm/gc/cpu` represents the total time over multiple GC cycles; divide by `jvm/gc/count` to get the mean GC time per cycle.") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmGcCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmGcGenAttributeValue string, jvmGcNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("gc_gen", jvmGcGenAttributeValue) + dp.Attributes().PutStr("gc_name", jvmGcNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmGcCPU) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmGcCPU) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmGcCPU(cfg MetricConfig) metricApachedruidJvmGcCPU { + m := metricApachedruidJvmGcCPU{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmMemCommitted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.mem.committed metric with initial data. +func (m *metricApachedruidJvmMemCommitted) init() { + m.data.SetName("apachedruid.jvm.mem.committed") + m.data.SetDescription("Committed memory.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmMemCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("mem_kind", jvmMemKindAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmMemCommitted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmMemCommitted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmMemCommitted(cfg MetricConfig) metricApachedruidJvmMemCommitted { + m := metricApachedruidJvmMemCommitted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmMemInit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.mem.init metric with initial data. +func (m *metricApachedruidJvmMemInit) init() { + m.data.SetName("apachedruid.jvm.mem.init") + m.data.SetDescription("Initial memory.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmMemInit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("mem_kind", jvmMemKindAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmMemInit) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmMemInit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmMemInit(cfg MetricConfig) metricApachedruidJvmMemInit { + m := metricApachedruidJvmMemInit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmMemMax struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.mem.max metric with initial data. +func (m *metricApachedruidJvmMemMax) init() { + m.data.SetName("apachedruid.jvm.mem.max") + m.data.SetDescription("Max memory.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmMemMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("mem_kind", jvmMemKindAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmMemMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmMemMax) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmMemMax(cfg MetricConfig) metricApachedruidJvmMemMax { + m := metricApachedruidJvmMemMax{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmMemUsed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.mem.used metric with initial data. +func (m *metricApachedruidJvmMemUsed) init() { + m.data.SetName("apachedruid.jvm.mem.used") + m.data.SetDescription("Used memory.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmMemUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("mem_kind", jvmMemKindAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmMemUsed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmMemUsed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmMemUsed(cfg MetricConfig) metricApachedruidJvmMemUsed { + m := metricApachedruidJvmMemUsed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmPoolCommitted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.pool.committed metric with initial data. +func (m *metricApachedruidJvmPoolCommitted) init() { + m.data.SetName("apachedruid.jvm.pool.committed") + m.data.SetDescription("Committed pool.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmPoolCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("pool_name", jvmPoolNameAttributeValue) + dp.Attributes().PutStr("pool_kind", jvmPoolKindAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmPoolCommitted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmPoolCommitted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmPoolCommitted(cfg MetricConfig) metricApachedruidJvmPoolCommitted { + m := metricApachedruidJvmPoolCommitted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmPoolInit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.pool.init metric with initial data. +func (m *metricApachedruidJvmPoolInit) init() { + m.data.SetName("apachedruid.jvm.pool.init") + m.data.SetDescription("Initial pool.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmPoolInit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("pool_name", jvmPoolNameAttributeValue) + dp.Attributes().PutStr("pool_kind", jvmPoolKindAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmPoolInit) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmPoolInit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmPoolInit(cfg MetricConfig) metricApachedruidJvmPoolInit { + m := metricApachedruidJvmPoolInit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmPoolMax struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.pool.max metric with initial data. +func (m *metricApachedruidJvmPoolMax) init() { + m.data.SetName("apachedruid.jvm.pool.max") + m.data.SetDescription("Max pool.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmPoolMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("pool_name", jvmPoolNameAttributeValue) + dp.Attributes().PutStr("pool_kind", jvmPoolKindAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmPoolMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmPoolMax) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmPoolMax(cfg MetricConfig) metricApachedruidJvmPoolMax { + m := metricApachedruidJvmPoolMax{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidJvmPoolUsed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.jvm.pool.used metric with initial data. +func (m *metricApachedruidJvmPoolUsed) init() { + m.data.SetName("apachedruid.jvm.pool.used") + m.data.SetDescription("Pool used.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidJvmPoolUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("pool_name", jvmPoolNameAttributeValue) + dp.Attributes().PutStr("pool_kind", jvmPoolKindAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidJvmPoolUsed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidJvmPoolUsed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidJvmPoolUsed(cfg MetricConfig) metricApachedruidJvmPoolUsed { + m := metricApachedruidJvmPoolUsed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidKillPendingSegmentsCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.kill.pending_segments.count metric with initial data. +func (m *metricApachedruidKillPendingSegmentsCount) init() { + m.data.SetName("apachedruid.kill.pending_segments.count") + m.data.SetDescription("Number of stale pending segments deleted from the metadata store.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidKillPendingSegmentsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, killDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", killDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidKillPendingSegmentsCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidKillPendingSegmentsCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidKillPendingSegmentsCount(cfg MetricConfig) metricApachedruidKillPendingSegmentsCount { + m := metricApachedruidKillPendingSegmentsCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidKillTaskCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.kill.task.count metric with initial data. +func (m *metricApachedruidKillTaskCount) init() { + m.data.SetName("apachedruid.kill.task.count") + m.data.SetDescription("Number of tasks issued in the auto kill run.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidKillTaskCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidKillTaskCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidKillTaskCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidKillTaskCount(cfg MetricConfig) metricApachedruidKillTaskCount { + m := metricApachedruidKillTaskCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidKillTaskAvailableSlotCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.kill_task.available_slot.count metric with initial data. +func (m *metricApachedruidKillTaskAvailableSlotCount) init() { + m.data.SetName("apachedruid.kill_task.available_slot.count") + m.data.SetDescription("Number of available task slots that can be used for auto kill tasks in the auto kill run. This is the max number of task slots minus any currently running auto kill tasks.") + m.data.SetUnit("{slots}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidKillTaskAvailableSlotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidKillTaskAvailableSlotCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidKillTaskAvailableSlotCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidKillTaskAvailableSlotCount(cfg MetricConfig) metricApachedruidKillTaskAvailableSlotCount { + m := metricApachedruidKillTaskAvailableSlotCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidKillTaskMaxSlotCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.kill_task.max_slot.count metric with initial data. +func (m *metricApachedruidKillTaskMaxSlotCount) init() { + m.data.SetName("apachedruid.kill_task.max_slot.count") + m.data.SetDescription("Maximum number of task slots available for auto kill tasks in the auto kill run.") + m.data.SetUnit("{slots}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidKillTaskMaxSlotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidKillTaskMaxSlotCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidKillTaskMaxSlotCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidKillTaskMaxSlotCount(cfg MetricConfig) metricApachedruidKillTaskMaxSlotCount { + m := metricApachedruidKillTaskMaxSlotCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMergeBufferPendingRequests struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.merge_buffer.pending_requests metric with initial data. +func (m *metricApachedruidMergeBufferPendingRequests) init() { + m.data.SetName("apachedruid.merge_buffer.pending_requests") + m.data.SetDescription("Number of requests waiting to acquire a batch of buffers from the merge buffer pool.") + m.data.SetUnit("{requests}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidMergeBufferPendingRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMergeBufferPendingRequests) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMergeBufferPendingRequests) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMergeBufferPendingRequests(cfg MetricConfig) metricApachedruidMergeBufferPendingRequests { + m := metricApachedruidMergeBufferPendingRequests{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMetadataKillAuditCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.metadata.kill.audit.count metric with initial data. +func (m *metricApachedruidMetadataKillAuditCount) init() { + m.data.SetName("apachedruid.metadata.kill.audit.count") + m.data.SetDescription("Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. This metric is emitted only when `druid.coordinator.kill.audit.on` is set to true.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidMetadataKillAuditCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMetadataKillAuditCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMetadataKillAuditCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMetadataKillAuditCount(cfg MetricConfig) metricApachedruidMetadataKillAuditCount { + m := metricApachedruidMetadataKillAuditCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMetadataKillCompactionCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.metadata.kill.compaction.count metric with initial data. +func (m *metricApachedruidMetadataKillCompactionCount) init() { + m.data.SetName("apachedruid.metadata.kill.compaction.count") + m.data.SetDescription("Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. This metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidMetadataKillCompactionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMetadataKillCompactionCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMetadataKillCompactionCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMetadataKillCompactionCount(cfg MetricConfig) metricApachedruidMetadataKillCompactionCount { + m := metricApachedruidMetadataKillCompactionCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMetadataKillDatasourceCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.metadata.kill.datasource.count metric with initial data. +func (m *metricApachedruidMetadataKillDatasourceCount) init() { + m.data.SetName("apachedruid.metadata.kill.datasource.count") + m.data.SetDescription("Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run. Note that datasource metadata only exists for datasource created from supervisor. This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidMetadataKillDatasourceCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMetadataKillDatasourceCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMetadataKillDatasourceCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMetadataKillDatasourceCount(cfg MetricConfig) metricApachedruidMetadataKillDatasourceCount { + m := metricApachedruidMetadataKillDatasourceCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMetadataKillRuleCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.metadata.kill.rule.count metric with initial data. +func (m *metricApachedruidMetadataKillRuleCount) init() { + m.data.SetName("apachedruid.metadata.kill.rule.count") + m.data.SetDescription("Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.rule.on` is set to true.") + m.data.SetUnit("{rules}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidMetadataKillRuleCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMetadataKillRuleCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMetadataKillRuleCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMetadataKillRuleCount(cfg MetricConfig) metricApachedruidMetadataKillRuleCount { + m := metricApachedruidMetadataKillRuleCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMetadataKillSupervisorCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.metadata.kill.supervisor.count metric with initial data. +func (m *metricApachedruidMetadataKillSupervisorCount) init() { + m.data.SetName("apachedruid.metadata.kill.supervisor.count") + m.data.SetDescription("Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true.") + m.data.SetUnit("{supervisors}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidMetadataKillSupervisorCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMetadataKillSupervisorCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMetadataKillSupervisorCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMetadataKillSupervisorCount(cfg MetricConfig) metricApachedruidMetadataKillSupervisorCount { + m := metricApachedruidMetadataKillSupervisorCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMetadatacacheInitTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.metadatacache.init.time metric with initial data. +func (m *metricApachedruidMetadatacacheInitTime) init() { + m.data.SetName("apachedruid.metadatacache.init.time") + m.data.SetDescription("Time taken to initialize the broker segment metadata cache. Useful to detect if brokers are taking too long to start.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidMetadatacacheInitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMetadatacacheInitTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMetadatacacheInitTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMetadatacacheInitTime(cfg MetricConfig) metricApachedruidMetadatacacheInitTime { + m := metricApachedruidMetadatacacheInitTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMetadatacacheRefreshCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.metadatacache.refresh.count metric with initial data. +func (m *metricApachedruidMetadatacacheRefreshCount) init() { + m.data.SetName("apachedruid.metadatacache.refresh.count") + m.data.SetDescription("Number of segments to refresh in broker segment metadata cache.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidMetadatacacheRefreshCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMetadatacacheRefreshCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMetadatacacheRefreshCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMetadatacacheRefreshCount(cfg MetricConfig) metricApachedruidMetadatacacheRefreshCount { + m := metricApachedruidMetadatacacheRefreshCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidMetadatacacheRefreshTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.metadatacache.refresh.time metric with initial data. +func (m *metricApachedruidMetadatacacheRefreshTime) init() { + m.data.SetName("apachedruid.metadatacache.refresh.time") + m.data.SetDescription("Time taken to refresh segments in broker segment metadata cache.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidMetadatacacheRefreshTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidMetadatacacheRefreshTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidMetadatacacheRefreshTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidMetadatacacheRefreshTime(cfg MetricConfig) metricApachedruidMetadatacacheRefreshTime { + m := metricApachedruidMetadatacacheRefreshTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryByteLimitExceededCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.byte_limit.exceeded.count metric with initial data. +func (m *metricApachedruidQueryByteLimitExceededCount) init() { + m.data.SetName("apachedruid.query.byte_limit.exceeded.count") + m.data.SetDescription("Number of queries whose inlined subquery results exceeded the given byte limit.") + m.data.SetUnit("{queries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryByteLimitExceededCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryByteLimitExceededCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryByteLimitExceededCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryByteLimitExceededCount(cfg MetricConfig) metricApachedruidQueryByteLimitExceededCount { + m := metricApachedruidQueryByteLimitExceededCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.bytes metric with initial data. +func (m *metricApachedruidQueryBytes) init() { + m.data.SetName("apachedruid.query.bytes") + m.data.SetDescription("The total number of bytes returned to the requesting client in the query response from the broker. Other services report the total bytes for their portion of the query.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", queryDataSourceAttributeValue) + dp.Attributes().PutStr("num_metrics", queryNumMetricsAttributeValue) + dp.Attributes().PutStr("dimension", queryDimensionAttributeValue) + dp.Attributes().PutStr("has_filters", queryHasFiltersAttributeValue) + dp.Attributes().PutInt("threshold", queryThresholdAttributeValue) + dp.Attributes().PutInt("num_complex_metrics", queryNumComplexMetricsAttributeValue) + dp.Attributes().PutStr("type", queryTypeAttributeValue) + dp.Attributes().PutStr("remote_address", queryRemoteAddressAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) + dp.Attributes().PutStr("context", queryContextAttributeValue) + dp.Attributes().PutStr("num_dimensions", queryNumDimensionsAttributeValue) + dp.Attributes().PutStr("interval", queryIntervalAttributeValue) + dp.Attributes().PutStr("duration", queryDurationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryBytes(cfg MetricConfig) metricApachedruidQueryBytes { + m := metricApachedruidQueryBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaAverageBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.average_bytes metric with initial data. +func (m *metricApachedruidQueryCacheDeltaAverageBytes) init() { + m.data.SetName("apachedruid.query.cache.delta.average_bytes") + m.data.SetDescription("Average cache entry byte size.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaAverageBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaAverageBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaAverageBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaAverageBytes(cfg MetricConfig) metricApachedruidQueryCacheDeltaAverageBytes { + m := metricApachedruidQueryCacheDeltaAverageBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaErrors struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.errors metric with initial data. +func (m *metricApachedruidQueryCacheDeltaErrors) init() { + m.data.SetName("apachedruid.query.cache.delta.errors") + m.data.SetDescription("Number of cache errors.") + m.data.SetUnit("{errors}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaErrors) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaErrors) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaErrors(cfg MetricConfig) metricApachedruidQueryCacheDeltaErrors { + m := metricApachedruidQueryCacheDeltaErrors{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaEvictions struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.evictions metric with initial data. +func (m *metricApachedruidQueryCacheDeltaEvictions) init() { + m.data.SetName("apachedruid.query.cache.delta.evictions") + m.data.SetDescription("Number of cache evictions.") + m.data.SetUnit("{evictions}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaEvictions) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaEvictions) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaEvictions(cfg MetricConfig) metricApachedruidQueryCacheDeltaEvictions { + m := metricApachedruidQueryCacheDeltaEvictions{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaHitRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.hit_rate metric with initial data. +func (m *metricApachedruidQueryCacheDeltaHitRate) init() { + m.data.SetName("apachedruid.query.cache.delta.hit_rate") + m.data.SetDescription("Cache hit rate.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaHitRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaHitRate) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaHitRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaHitRate(cfg MetricConfig) metricApachedruidQueryCacheDeltaHitRate { + m := metricApachedruidQueryCacheDeltaHitRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaHits struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.hits metric with initial data. +func (m *metricApachedruidQueryCacheDeltaHits) init() { + m.data.SetName("apachedruid.query.cache.delta.hits") + m.data.SetDescription("Number of cache hits.") + m.data.SetUnit("{hits}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaHits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaHits) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaHits) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaHits(cfg MetricConfig) metricApachedruidQueryCacheDeltaHits { + m := metricApachedruidQueryCacheDeltaHits{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaMisses struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.misses metric with initial data. +func (m *metricApachedruidQueryCacheDeltaMisses) init() { + m.data.SetName("apachedruid.query.cache.delta.misses") + m.data.SetDescription("Number of cache misses.") + m.data.SetUnit("{misses}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaMisses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaMisses) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaMisses) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaMisses(cfg MetricConfig) metricApachedruidQueryCacheDeltaMisses { + m := metricApachedruidQueryCacheDeltaMisses{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaNumEntries struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.num_entries metric with initial data. +func (m *metricApachedruidQueryCacheDeltaNumEntries) init() { + m.data.SetName("apachedruid.query.cache.delta.num_entries") + m.data.SetDescription("Number of cache entries.") + m.data.SetUnit("{entries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaNumEntries) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaNumEntries) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaNumEntries) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaNumEntries(cfg MetricConfig) metricApachedruidQueryCacheDeltaNumEntries { + m := metricApachedruidQueryCacheDeltaNumEntries{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaPutError struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.put.error metric with initial data. +func (m *metricApachedruidQueryCacheDeltaPutError) init() { + m.data.SetName("apachedruid.query.cache.delta.put.error") + m.data.SetDescription("Number of new cache entries that could not be cached due to errors.") + m.data.SetUnit("{errors}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaPutError) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaPutError) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaPutError) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaPutError(cfg MetricConfig) metricApachedruidQueryCacheDeltaPutError { + m := metricApachedruidQueryCacheDeltaPutError{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaPutOk struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.put.ok metric with initial data. +func (m *metricApachedruidQueryCacheDeltaPutOk) init() { + m.data.SetName("apachedruid.query.cache.delta.put.ok") + m.data.SetDescription("Number of new cache entries successfully cached.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaPutOk) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaPutOk) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaPutOk) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaPutOk(cfg MetricConfig) metricApachedruidQueryCacheDeltaPutOk { + m := metricApachedruidQueryCacheDeltaPutOk{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaPutOversized struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.put.oversized metric with initial data. +func (m *metricApachedruidQueryCacheDeltaPutOversized) init() { + m.data.SetName("apachedruid.query.cache.delta.put.oversized") + m.data.SetDescription("Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties).") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaPutOversized) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaPutOversized) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaPutOversized) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaPutOversized(cfg MetricConfig) metricApachedruidQueryCacheDeltaPutOversized { + m := metricApachedruidQueryCacheDeltaPutOversized{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaSizeBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.size_bytes metric with initial data. +func (m *metricApachedruidQueryCacheDeltaSizeBytes) init() { + m.data.SetName("apachedruid.query.cache.delta.size_bytes") + m.data.SetDescription("Size in bytes of cache entries.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaSizeBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaSizeBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaSizeBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaSizeBytes(cfg MetricConfig) metricApachedruidQueryCacheDeltaSizeBytes { + m := metricApachedruidQueryCacheDeltaSizeBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheDeltaTimeouts struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.delta.timeouts metric with initial data. +func (m *metricApachedruidQueryCacheDeltaTimeouts) init() { + m.data.SetName("apachedruid.query.cache.delta.timeouts") + m.data.SetDescription("Number of cache timeouts.") + m.data.SetUnit("{timeouts}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheDeltaTimeouts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheDeltaTimeouts) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheDeltaTimeouts) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheDeltaTimeouts(cfg MetricConfig) metricApachedruidQueryCacheDeltaTimeouts { + m := metricApachedruidQueryCacheDeltaTimeouts{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheMemcachedDelta struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.memcached.delta metric with initial data. +func (m *metricApachedruidQueryCacheMemcachedDelta) init() { + m.data.SetName("apachedruid.query.cache.memcached.delta") + m.data.SetDescription("Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their delta from the prior event emission.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCacheMemcachedDelta) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheMemcachedDelta) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheMemcachedDelta) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheMemcachedDelta(cfg MetricConfig) metricApachedruidQueryCacheMemcachedDelta { + m := metricApachedruidQueryCacheMemcachedDelta{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheMemcachedTotal struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.memcached.total metric with initial data. +func (m *metricApachedruidQueryCacheMemcachedTotal) init() { + m.data.SetName("apachedruid.query.cache.memcached.total") + m.data.SetDescription("Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their actual values.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheMemcachedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheMemcachedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheMemcachedTotal) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheMemcachedTotal(cfg MetricConfig) metricApachedruidQueryCacheMemcachedTotal { + m := metricApachedruidQueryCacheMemcachedTotal{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalAverageBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.average_bytes metric with initial data. +func (m *metricApachedruidQueryCacheTotalAverageBytes) init() { + m.data.SetName("apachedruid.query.cache.total.average_bytes") + m.data.SetDescription("Average cache entry byte size.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalAverageBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalAverageBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalAverageBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalAverageBytes(cfg MetricConfig) metricApachedruidQueryCacheTotalAverageBytes { + m := metricApachedruidQueryCacheTotalAverageBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalErrors struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.errors metric with initial data. +func (m *metricApachedruidQueryCacheTotalErrors) init() { + m.data.SetName("apachedruid.query.cache.total.errors") + m.data.SetDescription("Number of cache errors.") + m.data.SetUnit("{errors}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalErrors) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalErrors) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalErrors(cfg MetricConfig) metricApachedruidQueryCacheTotalErrors { + m := metricApachedruidQueryCacheTotalErrors{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalEvictions struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.evictions metric with initial data. +func (m *metricApachedruidQueryCacheTotalEvictions) init() { + m.data.SetName("apachedruid.query.cache.total.evictions") + m.data.SetDescription("Number of cache evictions.") + m.data.SetUnit("{evictions}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalEvictions) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalEvictions) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalEvictions(cfg MetricConfig) metricApachedruidQueryCacheTotalEvictions { + m := metricApachedruidQueryCacheTotalEvictions{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalHitRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.hit_rate metric with initial data. +func (m *metricApachedruidQueryCacheTotalHitRate) init() { + m.data.SetName("apachedruid.query.cache.total.hit_rate") + m.data.SetDescription("Cache hit rate.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalHitRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalHitRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalHitRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalHitRate(cfg MetricConfig) metricApachedruidQueryCacheTotalHitRate { + m := metricApachedruidQueryCacheTotalHitRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalHits struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.hits metric with initial data. +func (m *metricApachedruidQueryCacheTotalHits) init() { + m.data.SetName("apachedruid.query.cache.total.hits") + m.data.SetDescription("Number of cache hits.") + m.data.SetUnit("{hits}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalHits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalHits) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalHits) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalHits(cfg MetricConfig) metricApachedruidQueryCacheTotalHits { + m := metricApachedruidQueryCacheTotalHits{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalMisses struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.misses metric with initial data. +func (m *metricApachedruidQueryCacheTotalMisses) init() { + m.data.SetName("apachedruid.query.cache.total.misses") + m.data.SetDescription("Number of cache misses.") + m.data.SetUnit("{misses}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalMisses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalMisses) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalMisses) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalMisses(cfg MetricConfig) metricApachedruidQueryCacheTotalMisses { + m := metricApachedruidQueryCacheTotalMisses{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalNumEntries struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.num_entries metric with initial data. +func (m *metricApachedruidQueryCacheTotalNumEntries) init() { + m.data.SetName("apachedruid.query.cache.total.num_entries") + m.data.SetDescription("Number of cache entries.") + m.data.SetUnit("{entries}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalNumEntries) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalNumEntries) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalNumEntries) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalNumEntries(cfg MetricConfig) metricApachedruidQueryCacheTotalNumEntries { + m := metricApachedruidQueryCacheTotalNumEntries{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalPutError struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.put.error metric with initial data. +func (m *metricApachedruidQueryCacheTotalPutError) init() { + m.data.SetName("apachedruid.query.cache.total.put.error") + m.data.SetDescription("Number of new cache entries that could not be cached due to errors.") + m.data.SetUnit("{errors}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalPutError) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalPutError) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalPutError) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalPutError(cfg MetricConfig) metricApachedruidQueryCacheTotalPutError { + m := metricApachedruidQueryCacheTotalPutError{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalPutOk struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.put.ok metric with initial data. +func (m *metricApachedruidQueryCacheTotalPutOk) init() { + m.data.SetName("apachedruid.query.cache.total.put.ok") + m.data.SetDescription("Number of new cache entries successfully cached.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalPutOk) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalPutOk) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalPutOk) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalPutOk(cfg MetricConfig) metricApachedruidQueryCacheTotalPutOk { + m := metricApachedruidQueryCacheTotalPutOk{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalPutOversized struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.put.oversized metric with initial data. +func (m *metricApachedruidQueryCacheTotalPutOversized) init() { + m.data.SetName("apachedruid.query.cache.total.put.oversized") + m.data.SetDescription("Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties).") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalPutOversized) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalPutOversized) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalPutOversized) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalPutOversized(cfg MetricConfig) metricApachedruidQueryCacheTotalPutOversized { + m := metricApachedruidQueryCacheTotalPutOversized{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalSizeBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.size_bytes metric with initial data. +func (m *metricApachedruidQueryCacheTotalSizeBytes) init() { + m.data.SetName("apachedruid.query.cache.total.size_bytes") + m.data.SetDescription("Size in bytes of cache entries.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalSizeBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalSizeBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalSizeBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalSizeBytes(cfg MetricConfig) metricApachedruidQueryCacheTotalSizeBytes { + m := metricApachedruidQueryCacheTotalSizeBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCacheTotalTimeouts struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cache.total.timeouts metric with initial data. +func (m *metricApachedruidQueryCacheTotalTimeouts) init() { + m.data.SetName("apachedruid.query.cache.total.timeouts") + m.data.SetDescription("Number of cache timeouts.") + m.data.SetUnit("{timeouts}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidQueryCacheTotalTimeouts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCacheTotalTimeouts) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCacheTotalTimeouts) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCacheTotalTimeouts(cfg MetricConfig) metricApachedruidQueryCacheTotalTimeouts { + m := metricApachedruidQueryCacheTotalTimeouts{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.count metric with initial data. +func (m *metricApachedruidQueryCount) init() { + m.data.SetName("apachedruid.query.count") + m.data.SetDescription("Number of total queries.") + m.data.SetUnit("{queries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCount(cfg MetricConfig) metricApachedruidQueryCount { + m := metricApachedruidQueryCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryCPUTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.cpu.time metric with initial data. +func (m *metricApachedruidQueryCPUTime) init() { + m.data.SetName("apachedruid.query.cpu.time") + m.data.SetDescription("Microseconds of CPU time taken to complete a query.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", queryDataSourceAttributeValue) + dp.Attributes().PutStr("num_metrics", queryNumMetricsAttributeValue) + dp.Attributes().PutStr("dimension", queryDimensionAttributeValue) + dp.Attributes().PutStr("has_filters", queryHasFiltersAttributeValue) + dp.Attributes().PutInt("threshold", queryThresholdAttributeValue) + dp.Attributes().PutInt("num_complex_metrics", queryNumComplexMetricsAttributeValue) + dp.Attributes().PutStr("type", queryTypeAttributeValue) + dp.Attributes().PutStr("remote_address", queryRemoteAddressAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) + dp.Attributes().PutStr("context", queryContextAttributeValue) + dp.Attributes().PutStr("num_dimensions", queryNumDimensionsAttributeValue) + dp.Attributes().PutStr("interval", queryIntervalAttributeValue) + dp.Attributes().PutStr("duration", queryDurationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryCPUTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryCPUTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryCPUTime(cfg MetricConfig) metricApachedruidQueryCPUTime { + m := metricApachedruidQueryCPUTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryFailedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.failed.count metric with initial data. +func (m *metricApachedruidQueryFailedCount) init() { + m.data.SetName("apachedruid.query.failed.count") + m.data.SetDescription("Number of failed queries.") + m.data.SetUnit("{queries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryFailedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryFailedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryFailedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryFailedCount(cfg MetricConfig) metricApachedruidQueryFailedCount { + m := metricApachedruidQueryFailedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryInterruptedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.interrupted.count metric with initial data. +func (m *metricApachedruidQueryInterruptedCount) init() { + m.data.SetName("apachedruid.query.interrupted.count") + m.data.SetDescription("Number of queries interrupted due to cancellation.") + m.data.SetUnit("{queries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryInterruptedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryInterruptedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryInterruptedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryInterruptedCount(cfg MetricConfig) metricApachedruidQueryInterruptedCount { + m := metricApachedruidQueryInterruptedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryNodeBackpressure struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.node.backpressure metric with initial data. +func (m *metricApachedruidQueryNodeBackpressure) init() { + m.data.SetName("apachedruid.query.node.backpressure") + m.data.SetDescription("Milliseconds that the channel to this process has spent suspended due to backpressure.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryNodeBackpressure) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("status", queryStatusAttributeValue) + dp.Attributes().PutStr("server", queryServerAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryNodeBackpressure) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryNodeBackpressure) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryNodeBackpressure(cfg MetricConfig) metricApachedruidQueryNodeBackpressure { + m := metricApachedruidQueryNodeBackpressure{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryNodeBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.node.bytes metric with initial data. +func (m *metricApachedruidQueryNodeBytes) init() { + m.data.SetName("apachedruid.query.node.bytes") + m.data.SetDescription("Number of bytes returned from querying individual historical/realtime processes.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryNodeBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("status", queryStatusAttributeValue) + dp.Attributes().PutStr("server", queryServerAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryNodeBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryNodeBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryNodeBytes(cfg MetricConfig) metricApachedruidQueryNodeBytes { + m := metricApachedruidQueryNodeBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryNodeTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.node.time metric with initial data. +func (m *metricApachedruidQueryNodeTime) init() { + m.data.SetName("apachedruid.query.node.time") + m.data.SetDescription("Milliseconds taken to query individual historical/realtime processes.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryNodeTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("status", queryStatusAttributeValue) + dp.Attributes().PutStr("server", queryServerAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryNodeTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryNodeTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryNodeTime(cfg MetricConfig) metricApachedruidQueryNodeTime { + m := metricApachedruidQueryNodeTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryNodeTtfb struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.node.ttfb metric with initial data. +func (m *metricApachedruidQueryNodeTtfb) init() { + m.data.SetName("apachedruid.query.node.ttfb") + m.data.SetDescription("Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryNodeTtfb) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("status", queryStatusAttributeValue) + dp.Attributes().PutStr("server", queryServerAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryNodeTtfb) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryNodeTtfb) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryNodeTtfb(cfg MetricConfig) metricApachedruidQueryNodeTtfb { + m := metricApachedruidQueryNodeTtfb{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryPriority struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.priority metric with initial data. +func (m *metricApachedruidQueryPriority) init() { + m.data.SetName("apachedruid.query.priority") + m.data.SetDescription("Assigned lane and priority, only if Laning strategy is enabled. Refer to [Laning strategies](https,//druid.apache.org/docs/latest/configuration#laning-strategies).") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryPriority) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryTypeAttributeValue string, queryDataSourceAttributeValue string, queryLaneAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("type", queryTypeAttributeValue) + dp.Attributes().PutStr("data_source", queryDataSourceAttributeValue) + dp.Attributes().PutStr("lane", queryLaneAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryPriority) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryPriority) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryPriority(cfg MetricConfig) metricApachedruidQueryPriority { + m := metricApachedruidQueryPriority{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryRowLimitExceededCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.row_limit.exceeded.count metric with initial data. +func (m *metricApachedruidQueryRowLimitExceededCount) init() { + m.data.SetName("apachedruid.query.row_limit.exceeded.count") + m.data.SetDescription("Number of queries whose inlined subquery results exceeded the given row limit.") + m.data.SetUnit("{queries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryRowLimitExceededCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryRowLimitExceededCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryRowLimitExceededCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryRowLimitExceededCount(cfg MetricConfig) metricApachedruidQueryRowLimitExceededCount { + m := metricApachedruidQueryRowLimitExceededCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQuerySegmentTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.segment.time metric with initial data. +func (m *metricApachedruidQuerySegmentTime) init() { + m.data.SetName("apachedruid.query.segment.time") + m.data.SetDescription("Milliseconds taken to query individual segment. Includes time to page in the segment from disk.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQuerySegmentTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, querySegmentAttributeValue string, queryIDAttributeValue string, queryVectorizedAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("status", queryStatusAttributeValue) + dp.Attributes().PutStr("segment", querySegmentAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) + dp.Attributes().PutStr("vectorized", queryVectorizedAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQuerySegmentTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQuerySegmentTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQuerySegmentTime(cfg MetricConfig) metricApachedruidQuerySegmentTime { + m := metricApachedruidQuerySegmentTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQuerySegmentAndCacheTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.segment_and_cache.time metric with initial data. +func (m *metricApachedruidQuerySegmentAndCacheTime) init() { + m.data.SetName("apachedruid.query.segment_and_cache.time") + m.data.SetDescription("Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process).") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQuerySegmentAndCacheTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, querySegmentAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("segment", querySegmentAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQuerySegmentAndCacheTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQuerySegmentAndCacheTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQuerySegmentAndCacheTime(cfg MetricConfig) metricApachedruidQuerySegmentAndCacheTime { + m := metricApachedruidQuerySegmentAndCacheTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQuerySegmentsCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.segments.count metric with initial data. +func (m *metricApachedruidQuerySegmentsCount) init() { + m.data.SetName("apachedruid.query.segments.count") + m.data.SetDescription("This metric is not enabled by default. See the `QueryMetrics` Interface for reference regarding enabling this metric. Number of segments that will be touched by the query. In the broker, it makes a plan to distribute the query to realtime tasks and historicals based on a snapshot of segment distribution state. If there are some segments moved after this snapshot is created, certain historicals and realtime tasks can report those segments as missing to the broker. The broker will resend the query to the new servers that serve those segments after move. In this case, those segments can be counted more than once in this metric.") + m.data.SetUnit("{segments}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQuerySegmentsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQuerySegmentsCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQuerySegmentsCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQuerySegmentsCount(cfg MetricConfig) metricApachedruidQuerySegmentsCount { + m := metricApachedruidQuerySegmentsCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQuerySuccessCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.success.count metric with initial data. +func (m *metricApachedruidQuerySuccessCount) init() { + m.data.SetName("apachedruid.query.success.count") + m.data.SetDescription("Number of queries successfully processed.") + m.data.SetUnit("{queries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQuerySuccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQuerySuccessCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQuerySuccessCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQuerySuccessCount(cfg MetricConfig) metricApachedruidQuerySuccessCount { + m := metricApachedruidQuerySuccessCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.time metric with initial data. +func (m *metricApachedruidQueryTime) init() { + m.data.SetName("apachedruid.query.time") + m.data.SetDescription("Milliseconds taken to complete a query.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", queryDataSourceAttributeValue) + dp.Attributes().PutStr("num_metrics", queryNumMetricsAttributeValue) + dp.Attributes().PutStr("dimension", queryDimensionAttributeValue) + dp.Attributes().PutStr("has_filters", queryHasFiltersAttributeValue) + dp.Attributes().PutInt("threshold", queryThresholdAttributeValue) + dp.Attributes().PutInt("num_complex_metrics", queryNumComplexMetricsAttributeValue) + dp.Attributes().PutStr("type", queryTypeAttributeValue) + dp.Attributes().PutStr("remote_address", queryRemoteAddressAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) + dp.Attributes().PutStr("context", queryContextAttributeValue) + dp.Attributes().PutStr("num_dimensions", queryNumDimensionsAttributeValue) + dp.Attributes().PutStr("interval", queryIntervalAttributeValue) + dp.Attributes().PutStr("duration", queryDurationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryTime(cfg MetricConfig) metricApachedruidQueryTime { + m := metricApachedruidQueryTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryTimeoutCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.timeout.count metric with initial data. +func (m *metricApachedruidQueryTimeoutCount) init() { + m.data.SetName("apachedruid.query.timeout.count") + m.data.SetDescription("Number of timed out queries.") + m.data.SetUnit("{queries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidQueryTimeoutCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryTimeoutCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryTimeoutCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryTimeoutCount(cfg MetricConfig) metricApachedruidQueryTimeoutCount { + m := metricApachedruidQueryTimeoutCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidQueryWaitTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.query.wait.time metric with initial data. +func (m *metricApachedruidQueryWaitTime) init() { + m.data.SetName("apachedruid.query.wait.time") + m.data.SetDescription("Milliseconds spent waiting for a segment to be scanned.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidQueryWaitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, querySegmentAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("segment", querySegmentAttributeValue) + dp.Attributes().PutStr("id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidQueryWaitTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidQueryWaitTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidQueryWaitTime(cfg MetricConfig) metricApachedruidQueryWaitTime { + m := metricApachedruidQueryWaitTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentAddedBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.added.bytes metric with initial data. +func (m *metricApachedruidSegmentAddedBytes) init() { + m.data.SetName("apachedruid.segment.added.bytes") + m.data.SetDescription("Size in bytes of new segments created.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentAddedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", segmentTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", segmentGroupIDAttributeValue) + dp.Attributes().PutStr("tags", segmentTagsAttributeValue) + dp.Attributes().PutStr("task_id", segmentTaskIDAttributeValue) + dp.Attributes().PutStr("interval", segmentIntervalAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentAddedBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentAddedBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentAddedBytes(cfg MetricConfig) metricApachedruidSegmentAddedBytes { + m := metricApachedruidSegmentAddedBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentAssignSkippedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.assign_skipped.count metric with initial data. +func (m *metricApachedruidSegmentAssignSkippedCount) init() { + m.data.SetName("apachedruid.segment.assign_skipped.count") + m.data.SetDescription("Number of segments that could not be assigned to any server for loading. This can occur due to replication throttling, no available disk space, or a full load queue.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentAssignSkippedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("description", segmentDescriptionAttributeValue) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentAssignSkippedCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentAssignSkippedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentAssignSkippedCount(cfg MetricConfig) metricApachedruidSegmentAssignSkippedCount { + m := metricApachedruidSegmentAssignSkippedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentAssignedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.assigned.count metric with initial data. +func (m *metricApachedruidSegmentAssignedCount) init() { + m.data.SetName("apachedruid.segment.assigned.count") + m.data.SetDescription("Number of segments assigned to be loaded in the cluster.") + m.data.SetUnit("{segments}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentAssignedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentAssignedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentAssignedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentAssignedCount(cfg MetricConfig) metricApachedruidSegmentAssignedCount { + m := metricApachedruidSegmentAssignedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentCompactedBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.compacted.bytes metric with initial data. +func (m *metricApachedruidSegmentCompactedBytes) init() { + m.data.SetName("apachedruid.segment.compacted.bytes") + m.data.SetDescription("Total bytes of this datasource that are already compacted with the spec set in the auto compaction config.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentCompactedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentCompactedBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentCompactedBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentCompactedBytes(cfg MetricConfig) metricApachedruidSegmentCompactedBytes { + m := metricApachedruidSegmentCompactedBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentCompactedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.compacted.count metric with initial data. +func (m *metricApachedruidSegmentCompactedCount) init() { + m.data.SetName("apachedruid.segment.compacted.count") + m.data.SetDescription("Total number of segments of this datasource that are already compacted with the spec set in the auto compaction config.") + m.data.SetUnit("{segments}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentCompactedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentCompactedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentCompactedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentCompactedCount(cfg MetricConfig) metricApachedruidSegmentCompactedCount { + m := metricApachedruidSegmentCompactedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.count metric with initial data. +func (m *metricApachedruidSegmentCount) init() { + m.data.SetName("apachedruid.segment.count") + m.data.SetDescription("Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentCount(cfg MetricConfig) metricApachedruidSegmentCount { + m := metricApachedruidSegmentCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentDeletedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.deleted.count metric with initial data. +func (m *metricApachedruidSegmentDeletedCount) init() { + m.data.SetName("apachedruid.segment.deleted.count") + m.data.SetDescription("Number of segments marked as unused due to drop rules.") + m.data.SetUnit("{segments}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentDeletedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentDeletedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentDeletedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentDeletedCount(cfg MetricConfig) metricApachedruidSegmentDeletedCount { + m := metricApachedruidSegmentDeletedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentDropQueueCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.drop_queue.count metric with initial data. +func (m *metricApachedruidSegmentDropQueueCount) init() { + m.data.SetName("apachedruid.segment.drop_queue.count") + m.data.SetDescription("Number of segments to drop.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentDropQueueCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("server", segmentServerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentDropQueueCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentDropQueueCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentDropQueueCount(cfg MetricConfig) metricApachedruidSegmentDropQueueCount { + m := metricApachedruidSegmentDropQueueCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentDropSkippedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.drop_skipped.count metric with initial data. +func (m *metricApachedruidSegmentDropSkippedCount) init() { + m.data.SetName("apachedruid.segment.drop_skipped.count") + m.data.SetDescription("Number of segments that could not be dropped from any server.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentDropSkippedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("description", segmentDescriptionAttributeValue) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentDropSkippedCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentDropSkippedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentDropSkippedCount(cfg MetricConfig) metricApachedruidSegmentDropSkippedCount { + m := metricApachedruidSegmentDropSkippedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentDroppedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.dropped.count metric with initial data. +func (m *metricApachedruidSegmentDroppedCount) init() { + m.data.SetName("apachedruid.segment.dropped.count") + m.data.SetDescription("Number of segments chosen to be dropped from the cluster due to being over-replicated.") + m.data.SetUnit("{segments}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentDroppedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentDroppedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentDroppedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentDroppedCount(cfg MetricConfig) metricApachedruidSegmentDroppedCount { + m := metricApachedruidSegmentDroppedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentLoadQueueAssigned struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.load_queue.assigned metric with initial data. +func (m *metricApachedruidSegmentLoadQueueAssigned) init() { + m.data.SetName("apachedruid.segment.load_queue.assigned") + m.data.SetDescription("Number of segments assigned for load or drop to the load queue of a server.") + m.data.SetUnit("{segments}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentLoadQueueAssigned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("server", segmentServerAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentLoadQueueAssigned) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentLoadQueueAssigned) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentLoadQueueAssigned(cfg MetricConfig) metricApachedruidSegmentLoadQueueAssigned { + m := metricApachedruidSegmentLoadQueueAssigned{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentLoadQueueCancelled struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.load_queue.cancelled metric with initial data. +func (m *metricApachedruidSegmentLoadQueueCancelled) init() { + m.data.SetName("apachedruid.segment.load_queue.cancelled") + m.data.SetDescription("Number of segment assignments that were canceled before completion.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentLoadQueueCancelled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("server", segmentServerAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentLoadQueueCancelled) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentLoadQueueCancelled) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentLoadQueueCancelled(cfg MetricConfig) metricApachedruidSegmentLoadQueueCancelled { + m := metricApachedruidSegmentLoadQueueCancelled{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentLoadQueueCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.load_queue.count metric with initial data. +func (m *metricApachedruidSegmentLoadQueueCount) init() { + m.data.SetName("apachedruid.segment.load_queue.count") + m.data.SetDescription("Number of segments to load.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentLoadQueueCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("server", segmentServerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentLoadQueueCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentLoadQueueCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentLoadQueueCount(cfg MetricConfig) metricApachedruidSegmentLoadQueueCount { + m := metricApachedruidSegmentLoadQueueCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentLoadQueueFailed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.load_queue.failed metric with initial data. +func (m *metricApachedruidSegmentLoadQueueFailed) init() { + m.data.SetName("apachedruid.segment.load_queue.failed") + m.data.SetDescription("Number of segment assignments that failed to complete.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentLoadQueueFailed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("server", segmentServerAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentLoadQueueFailed) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentLoadQueueFailed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentLoadQueueFailed(cfg MetricConfig) metricApachedruidSegmentLoadQueueFailed { + m := metricApachedruidSegmentLoadQueueFailed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentLoadQueueSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.load_queue.size metric with initial data. +func (m *metricApachedruidSegmentLoadQueueSize) init() { + m.data.SetName("apachedruid.segment.load_queue.size") + m.data.SetDescription("Size in bytes of segments to load.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentLoadQueueSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("server", segmentServerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentLoadQueueSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentLoadQueueSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentLoadQueueSize(cfg MetricConfig) metricApachedruidSegmentLoadQueueSize { + m := metricApachedruidSegmentLoadQueueSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentLoadQueueSuccess struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.load_queue.success metric with initial data. +func (m *metricApachedruidSegmentLoadQueueSuccess) init() { + m.data.SetName("apachedruid.segment.load_queue.success") + m.data.SetDescription("Number of segment assignments that completed successfully.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentLoadQueueSuccess) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("server", segmentServerAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentLoadQueueSuccess) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentLoadQueueSuccess) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentLoadQueueSuccess(cfg MetricConfig) metricApachedruidSegmentLoadQueueSuccess { + m := metricApachedruidSegmentLoadQueueSuccess{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentMax struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.max metric with initial data. +func (m *metricApachedruidSegmentMax) init() { + m.data.SetName("apachedruid.segment.max") + m.data.SetDescription("Maximum byte limit available for segments.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSegmentMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentMax) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentMax(cfg MetricConfig) metricApachedruidSegmentMax { + m := metricApachedruidSegmentMax{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentMoveSkippedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.move_skipped.count metric with initial data. +func (m *metricApachedruidSegmentMoveSkippedCount) init() { + m.data.SetName("apachedruid.segment.move_skipped.count") + m.data.SetDescription("Number of segments that were chosen for balancing but could not be moved. This can occur when segments are already optimally placed.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentMoveSkippedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("description", segmentDescriptionAttributeValue) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentMoveSkippedCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentMoveSkippedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentMoveSkippedCount(cfg MetricConfig) metricApachedruidSegmentMoveSkippedCount { + m := metricApachedruidSegmentMoveSkippedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentMovedBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.moved.bytes metric with initial data. +func (m *metricApachedruidSegmentMovedBytes) init() { + m.data.SetName("apachedruid.segment.moved.bytes") + m.data.SetDescription("Size in bytes of segments moved/archived via the Move Task.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentMovedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", segmentTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", segmentGroupIDAttributeValue) + dp.Attributes().PutStr("tags", segmentTagsAttributeValue) + dp.Attributes().PutStr("task_id", segmentTaskIDAttributeValue) + dp.Attributes().PutStr("interval", segmentIntervalAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentMovedBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentMovedBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentMovedBytes(cfg MetricConfig) metricApachedruidSegmentMovedBytes { + m := metricApachedruidSegmentMovedBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentMovedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.moved.count metric with initial data. +func (m *metricApachedruidSegmentMovedCount) init() { + m.data.SetName("apachedruid.segment.moved.count") + m.data.SetDescription("Number of segments moved in the cluster.") + m.data.SetUnit("{segments}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentMovedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentMovedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentMovedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentMovedCount(cfg MetricConfig) metricApachedruidSegmentMovedCount { + m := metricApachedruidSegmentMovedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentNukedBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.nuked.bytes metric with initial data. +func (m *metricApachedruidSegmentNukedBytes) init() { + m.data.SetName("apachedruid.segment.nuked.bytes") + m.data.SetDescription("Size in bytes of segments deleted via the Kill Task.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentNukedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", segmentTaskTypeAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", segmentGroupIDAttributeValue) + dp.Attributes().PutStr("tags", segmentTagsAttributeValue) + dp.Attributes().PutStr("task_id", segmentTaskIDAttributeValue) + dp.Attributes().PutStr("interval", segmentIntervalAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentNukedBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentNukedBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentNukedBytes(cfg MetricConfig) metricApachedruidSegmentNukedBytes { + m := metricApachedruidSegmentNukedBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentOverShadowedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.over_shadowed.count metric with initial data. +func (m *metricApachedruidSegmentOverShadowedCount) init() { + m.data.SetName("apachedruid.segment.over_shadowed.count") + m.data.SetDescription("Number of segments marked as unused due to being overshadowed.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSegmentOverShadowedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentOverShadowedCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentOverShadowedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentOverShadowedCount(cfg MetricConfig) metricApachedruidSegmentOverShadowedCount { + m := metricApachedruidSegmentOverShadowedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentPendingDelete struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.pending_delete metric with initial data. +func (m *metricApachedruidSegmentPendingDelete) init() { + m.data.SetName("apachedruid.segment.pending_delete") + m.data.SetDescription("On-disk size in bytes of segments that are waiting to be cleared out.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSegmentPendingDelete) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentPendingDelete) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentPendingDelete) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentPendingDelete(cfg MetricConfig) metricApachedruidSegmentPendingDelete { + m := metricApachedruidSegmentPendingDelete{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentRowCountAvg struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.row_count.avg metric with initial data. +func (m *metricApachedruidSegmentRowCountAvg) init() { + m.data.SetName("apachedruid.segment.row_count.avg") + m.data.SetDescription("The average number of rows per segment on a historical. `SegmentStatsMonitor` must be enabled.") + m.data.SetUnit("{rows}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentRowCountAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentRowCountAvg) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentRowCountAvg) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentRowCountAvg(cfg MetricConfig) metricApachedruidSegmentRowCountAvg { + m := metricApachedruidSegmentRowCountAvg{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentRowCountRangeCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.row_count.range.count metric with initial data. +func (m *metricApachedruidSegmentRowCountRangeCount) init() { + m.data.SetName("apachedruid.segment.row_count.range.count") + m.data.SetDescription("The number of segments in a bucket. `SegmentStatsMonitor` must be enabled.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentRowCountRangeCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string, segmentRangeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) + dp.Attributes().PutStr("range", segmentRangeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentRowCountRangeCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentRowCountRangeCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentRowCountRangeCount(cfg MetricConfig) metricApachedruidSegmentRowCountRangeCount { + m := metricApachedruidSegmentRowCountRangeCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentScanActive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.scan.active metric with initial data. +func (m *metricApachedruidSegmentScanActive) init() { + m.data.SetName("apachedruid.segment.scan.active") + m.data.SetDescription("Number of segments currently scanned. This metric also indicates how many threads from `druid.processing.numThreads` are currently being used.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSegmentScanActive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentScanActive) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentScanActive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentScanActive(cfg MetricConfig) metricApachedruidSegmentScanActive { + m := metricApachedruidSegmentScanActive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentScanPending struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.scan.pending metric with initial data. +func (m *metricApachedruidSegmentScanPending) init() { + m.data.SetName("apachedruid.segment.scan.pending") + m.data.SetDescription("Number of segments in queue waiting to be scanned.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSegmentScanPending) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentScanPending) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentScanPending) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentScanPending(cfg MetricConfig) metricApachedruidSegmentScanPending { + m := metricApachedruidSegmentScanPending{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.size metric with initial data. +func (m *metricApachedruidSegmentSize) init() { + m.data.SetName("apachedruid.segment.size") + m.data.SetDescription("Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentSize(cfg MetricConfig) metricApachedruidSegmentSize { + m := metricApachedruidSegmentSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentSkipCompactBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.skip_compact.bytes metric with initial data. +func (m *metricApachedruidSegmentSkipCompactBytes) init() { + m.data.SetName("apachedruid.segment.skip_compact.bytes") + m.data.SetDescription("Total bytes of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentSkipCompactBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentSkipCompactBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentSkipCompactBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentSkipCompactBytes(cfg MetricConfig) metricApachedruidSegmentSkipCompactBytes { + m := metricApachedruidSegmentSkipCompactBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentSkipCompactCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.skip_compact.count metric with initial data. +func (m *metricApachedruidSegmentSkipCompactCount) init() { + m.data.SetName("apachedruid.segment.skip_compact.count") + m.data.SetDescription("Total number of segments of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentSkipCompactCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentSkipCompactCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentSkipCompactCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentSkipCompactCount(cfg MetricConfig) metricApachedruidSegmentSkipCompactCount { + m := metricApachedruidSegmentSkipCompactCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentUnavailableCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.unavailable.count metric with initial data. +func (m *metricApachedruidSegmentUnavailableCount) init() { + m.data.SetName("apachedruid.segment.unavailable.count") + m.data.SetDescription("Number of unique segments left to load until all used segments are available for queries.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentUnavailableCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentUnavailableCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentUnavailableCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentUnavailableCount(cfg MetricConfig) metricApachedruidSegmentUnavailableCount { + m := metricApachedruidSegmentUnavailableCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentUnderReplicatedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.under_replicated.count metric with initial data. +func (m *metricApachedruidSegmentUnderReplicatedCount) init() { + m.data.SetName("apachedruid.segment.under_replicated.count") + m.data.SetDescription("Number of segments, including replicas, left to load until all used segments are available for queries.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentUnderReplicatedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentUnderReplicatedCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentUnderReplicatedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentUnderReplicatedCount(cfg MetricConfig) metricApachedruidSegmentUnderReplicatedCount { + m := metricApachedruidSegmentUnderReplicatedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentUnneededCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.unneeded.count metric with initial data. +func (m *metricApachedruidSegmentUnneededCount) init() { + m.data.SetName("apachedruid.segment.unneeded.count") + m.data.SetDescription("Number of segments dropped due to being marked as unused.") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentUnneededCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentUnneededCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentUnneededCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentUnneededCount(cfg MetricConfig) metricApachedruidSegmentUnneededCount { + m := metricApachedruidSegmentUnneededCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentUsed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.used metric with initial data. +func (m *metricApachedruidSegmentUsed) init() { + m.data.SetName("apachedruid.segment.used") + m.data.SetDescription("Bytes used for served segments.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentUsed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentUsed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentUsed(cfg MetricConfig) metricApachedruidSegmentUsed { + m := metricApachedruidSegmentUsed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentUsedPercent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.used_percent metric with initial data. +func (m *metricApachedruidSegmentUsedPercent) init() { + m.data.SetName("apachedruid.segment.used_percent") + m.data.SetDescription("Percentage of space used by served segments.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentUsedPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) + dp.Attributes().PutStr("tier", segmentTierAttributeValue) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentUsedPercent) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentUsedPercent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentUsedPercent(cfg MetricConfig) metricApachedruidSegmentUsedPercent { + m := metricApachedruidSegmentUsedPercent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentWaitCompactBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.wait_compact.bytes metric with initial data. +func (m *metricApachedruidSegmentWaitCompactBytes) init() { + m.data.SetName("apachedruid.segment.wait_compact.bytes") + m.data.SetDescription("Total bytes of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentWaitCompactBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentWaitCompactBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentWaitCompactBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentWaitCompactBytes(cfg MetricConfig) metricApachedruidSegmentWaitCompactBytes { + m := metricApachedruidSegmentWaitCompactBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSegmentWaitCompactCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.segment.wait_compact.count metric with initial data. +func (m *metricApachedruidSegmentWaitCompactCount) init() { + m.data.SetName("apachedruid.segment.wait_compact.count") + m.data.SetDescription("Total number of segments of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).") + m.data.SetUnit("{segments}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSegmentWaitCompactCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSegmentWaitCompactCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSegmentWaitCompactCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSegmentWaitCompactCount(cfg MetricConfig) metricApachedruidSegmentWaitCompactCount { + m := metricApachedruidSegmentWaitCompactCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidServerviewInitTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.serverview.init.time metric with initial data. +func (m *metricApachedruidServerviewInitTime) init() { + m.data.SetName("apachedruid.serverview.init.time") + m.data.SetDescription("Time taken to initialize the broker server view. Useful to detect if brokers are taking too long to start.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidServerviewInitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidServerviewInitTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidServerviewInitTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidServerviewInitTime(cfg MetricConfig) metricApachedruidServerviewInitTime { + m := metricApachedruidServerviewInitTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidServerviewSyncHealthy struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.serverview.sync.healthy metric with initial data. +func (m *metricApachedruidServerviewSyncHealthy) init() { + m.data.SetName("apachedruid.serverview.sync.healthy") + m.data.SetDescription("Sync status of the Broker with a segment-loading server such as a Historical or Peon. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. This metric can be used in conjunction with `serverview/sync/unstableTime` to debug slow startup of Brokers.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidServerviewSyncHealthy) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverviewTierAttributeValue string, serverviewServerAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", serverviewTierAttributeValue) + dp.Attributes().PutStr("server", serverviewServerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidServerviewSyncHealthy) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidServerviewSyncHealthy) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidServerviewSyncHealthy(cfg MetricConfig) metricApachedruidServerviewSyncHealthy { + m := metricApachedruidServerviewSyncHealthy{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidServerviewSyncUnstableTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.serverview.sync.unstable_time metric with initial data. +func (m *metricApachedruidServerviewSyncUnstableTime) init() { + m.data.SetName("apachedruid.serverview.sync.unstable_time") + m.data.SetDescription("Time in milliseconds for which the Broker has been failing to sync with a segment-loading server. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidServerviewSyncUnstableTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverviewTierAttributeValue string, serverviewServerAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", serverviewTierAttributeValue) + dp.Attributes().PutStr("server", serverviewServerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidServerviewSyncUnstableTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidServerviewSyncUnstableTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidServerviewSyncUnstableTime(cfg MetricConfig) metricApachedruidServerviewSyncUnstableTime { + m := metricApachedruidServerviewSyncUnstableTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSQLQueryBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sql_query.bytes metric with initial data. +func (m *metricApachedruidSQLQueryBytes) init() { + m.data.SetName("apachedruid.sql_query.bytes") + m.data.SetDescription("Number of bytes returned in the SQL query response.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSQLQueryBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", sqlQueryDataSourceAttributeValue) + dp.Attributes().PutStr("native_query_ids", sqlQueryNativeQueryIdsAttributeValue) + dp.Attributes().PutStr("engine", sqlQueryEngineAttributeValue) + dp.Attributes().PutStr("remote_address", sqlQueryRemoteAddressAttributeValue) + dp.Attributes().PutStr("id", sqlQueryIDAttributeValue) + dp.Attributes().PutStr("success", sqlQuerySuccessAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSQLQueryBytes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSQLQueryBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSQLQueryBytes(cfg MetricConfig) metricApachedruidSQLQueryBytes { + m := metricApachedruidSQLQueryBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSQLQueryPlanningTimeMs struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sql_query.planning_time_ms metric with initial data. +func (m *metricApachedruidSQLQueryPlanningTimeMs) init() { + m.data.SetName("apachedruid.sql_query.planning_time_ms") + m.data.SetDescription("Milliseconds taken to plan a SQL to native query.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSQLQueryPlanningTimeMs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", sqlQueryDataSourceAttributeValue) + dp.Attributes().PutStr("native_query_ids", sqlQueryNativeQueryIdsAttributeValue) + dp.Attributes().PutStr("engine", sqlQueryEngineAttributeValue) + dp.Attributes().PutStr("remote_address", sqlQueryRemoteAddressAttributeValue) + dp.Attributes().PutStr("id", sqlQueryIDAttributeValue) + dp.Attributes().PutStr("success", sqlQuerySuccessAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSQLQueryPlanningTimeMs) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSQLQueryPlanningTimeMs) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSQLQueryPlanningTimeMs(cfg MetricConfig) metricApachedruidSQLQueryPlanningTimeMs { + m := metricApachedruidSQLQueryPlanningTimeMs{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSQLQueryTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sql_query.time metric with initial data. +func (m *metricApachedruidSQLQueryTime) init() { + m.data.SetName("apachedruid.sql_query.time") + m.data.SetDescription("Milliseconds taken to complete a SQL query.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSQLQueryTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", sqlQueryDataSourceAttributeValue) + dp.Attributes().PutStr("native_query_ids", sqlQueryNativeQueryIdsAttributeValue) + dp.Attributes().PutStr("engine", sqlQueryEngineAttributeValue) + dp.Attributes().PutStr("remote_address", sqlQueryRemoteAddressAttributeValue) + dp.Attributes().PutStr("id", sqlQueryIDAttributeValue) + dp.Attributes().PutStr("success", sqlQuerySuccessAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSQLQueryTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSQLQueryTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSQLQueryTime(cfg MetricConfig) metricApachedruidSQLQueryTime { + m := metricApachedruidSQLQueryTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSubqueryByteLimitCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.subquery.byte_limit.count metric with initial data. +func (m *metricApachedruidSubqueryByteLimitCount) init() { + m.data.SetName("apachedruid.subquery.byte_limit.count") + m.data.SetDescription("Number of subqueries whose results are materialized as frames (Druid's internal byte representation of rows).") + m.data.SetUnit("{subqueries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidSubqueryByteLimitCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSubqueryByteLimitCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSubqueryByteLimitCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSubqueryByteLimitCount(cfg MetricConfig) metricApachedruidSubqueryByteLimitCount { + m := metricApachedruidSubqueryByteLimitCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSubqueryFallbackCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.subquery.fallback.count metric with initial data. +func (m *metricApachedruidSubqueryFallbackCount) init() { + m.data.SetName("apachedruid.subquery.fallback.count") + m.data.SetDescription("Number of subqueries which cannot be materialized as frames.") + m.data.SetUnit("{subqueries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidSubqueryFallbackCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSubqueryFallbackCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSubqueryFallbackCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSubqueryFallbackCount(cfg MetricConfig) metricApachedruidSubqueryFallbackCount { + m := metricApachedruidSubqueryFallbackCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSubqueryFallbackInsufficientTypeCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.subquery.fallback.insufficient_type.count metric with initial data. +func (m *metricApachedruidSubqueryFallbackInsufficientTypeCount) init() { + m.data.SetName("apachedruid.subquery.fallback.insufficient_type.count") + m.data.SetDescription("Number of subqueries which cannot be materialized as frames due to insufficient type information in the row signature.") + m.data.SetUnit("{subqueries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidSubqueryFallbackInsufficientTypeCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSubqueryFallbackInsufficientTypeCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSubqueryFallbackInsufficientTypeCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSubqueryFallbackInsufficientTypeCount(cfg MetricConfig) metricApachedruidSubqueryFallbackInsufficientTypeCount { + m := metricApachedruidSubqueryFallbackInsufficientTypeCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSubqueryFallbackUnknownReasonCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.subquery.fallback.unknown_reason.count metric with initial data. +func (m *metricApachedruidSubqueryFallbackUnknownReasonCount) init() { + m.data.SetName("apachedruid.subquery.fallback.unknown_reason.count") + m.data.SetDescription("Number of subqueries which cannot be materialized as frames due other reasons.") + m.data.SetUnit("{subqueries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidSubqueryFallbackUnknownReasonCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSubqueryFallbackUnknownReasonCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSubqueryFallbackUnknownReasonCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSubqueryFallbackUnknownReasonCount(cfg MetricConfig) metricApachedruidSubqueryFallbackUnknownReasonCount { + m := metricApachedruidSubqueryFallbackUnknownReasonCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSubqueryRowLimitCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.subquery.row_limit.count metric with initial data. +func (m *metricApachedruidSubqueryRowLimitCount) init() { + m.data.SetName("apachedruid.subquery.row_limit.count") + m.data.SetDescription("Number of subqueries whose results are materialized as rows (Java objects on heap).") + m.data.SetUnit("{subqueries}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) +} + +func (m *metricApachedruidSubqueryRowLimitCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSubqueryRowLimitCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSubqueryRowLimitCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSubqueryRowLimitCount(cfg MetricConfig) metricApachedruidSubqueryRowLimitCount { + m := metricApachedruidSubqueryRowLimitCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysCPU struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.cpu metric with initial data. +func (m *metricApachedruidSysCPU) init() { + m.data.SetName("apachedruid.sys.cpu") + m.data.SetDescription("CPU used.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysCPUTimeAttributeValue string, sysCPUNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("cpu_time", sysCPUTimeAttributeValue) + dp.Attributes().PutStr("cpu_name", sysCPUNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysCPU) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysCPU) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysCPU(cfg MetricConfig) metricApachedruidSysCPU { + m := metricApachedruidSysCPU{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysDiskQueue struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.disk.queue metric with initial data. +func (m *metricApachedruidSysDiskQueue) init() { + m.data.SetName("apachedruid.sys.disk.queue") + m.data.SetDescription("Disk queue length. Measures number of requests waiting to be processed by disk.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysDiskQueue) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysDiskQueue) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysDiskQueue) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysDiskQueue(cfg MetricConfig) metricApachedruidSysDiskQueue { + m := metricApachedruidSysDiskQueue{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysDiskReadCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.disk.read.count metric with initial data. +func (m *metricApachedruidSysDiskReadCount) init() { + m.data.SetName("apachedruid.sys.disk.read.count") + m.data.SetDescription("Reads from disk.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysDiskReadCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysDiskReadCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysDiskReadCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysDiskReadCount(cfg MetricConfig) metricApachedruidSysDiskReadCount { + m := metricApachedruidSysDiskReadCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysDiskReadSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.disk.read.size metric with initial data. +func (m *metricApachedruidSysDiskReadSize) init() { + m.data.SetName("apachedruid.sys.disk.read.size") + m.data.SetDescription("Bytes read from disk. One indicator of the amount of paging occurring for segments.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysDiskReadSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysDiskReadSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysDiskReadSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysDiskReadSize(cfg MetricConfig) metricApachedruidSysDiskReadSize { + m := metricApachedruidSysDiskReadSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysDiskTransferTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.disk.transfer_time metric with initial data. +func (m *metricApachedruidSysDiskTransferTime) init() { + m.data.SetName("apachedruid.sys.disk.transfer_time") + m.data.SetDescription("Transfer time to read from or write to disk.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysDiskTransferTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysDiskTransferTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysDiskTransferTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysDiskTransferTime(cfg MetricConfig) metricApachedruidSysDiskTransferTime { + m := metricApachedruidSysDiskTransferTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysDiskWriteCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.disk.write.count metric with initial data. +func (m *metricApachedruidSysDiskWriteCount) init() { + m.data.SetName("apachedruid.sys.disk.write.count") + m.data.SetDescription("Writes to disk.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysDiskWriteCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysDiskWriteCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysDiskWriteCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysDiskWriteCount(cfg MetricConfig) metricApachedruidSysDiskWriteCount { + m := metricApachedruidSysDiskWriteCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysDiskWriteSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.disk.write.size metric with initial data. +func (m *metricApachedruidSysDiskWriteSize) init() { + m.data.SetName("apachedruid.sys.disk.write.size") + m.data.SetDescription("Bytes written to disk. One indicator of the amount of paging occurring for segments.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysDiskWriteSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysDiskWriteSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysDiskWriteSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysDiskWriteSize(cfg MetricConfig) metricApachedruidSysDiskWriteSize { + m := metricApachedruidSysDiskWriteSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysFsFilesCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.fs.files.count metric with initial data. +func (m *metricApachedruidSysFsFilesCount) init() { + m.data.SetName("apachedruid.sys.fs.files.count") + m.data.SetDescription("Filesystem total IO nodes.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysFsFilesCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) + dp.Attributes().PutStr("fs_dev_name", sysFsDevNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysFsFilesCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysFsFilesCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysFsFilesCount(cfg MetricConfig) metricApachedruidSysFsFilesCount { + m := metricApachedruidSysFsFilesCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysFsFilesFree struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.fs.files.free metric with initial data. +func (m *metricApachedruidSysFsFilesFree) init() { + m.data.SetName("apachedruid.sys.fs.files.free") + m.data.SetDescription("Filesystem free IO nodes.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysFsFilesFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) + dp.Attributes().PutStr("fs_dev_name", sysFsDevNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysFsFilesFree) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysFsFilesFree) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysFsFilesFree(cfg MetricConfig) metricApachedruidSysFsFilesFree { + m := metricApachedruidSysFsFilesFree{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysFsMax struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.fs.max metric with initial data. +func (m *metricApachedruidSysFsMax) init() { + m.data.SetName("apachedruid.sys.fs.max") + m.data.SetDescription("Filesystem bytes max.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysFsMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) + dp.Attributes().PutStr("fs_dev_name", sysFsDevNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysFsMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysFsMax) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysFsMax(cfg MetricConfig) metricApachedruidSysFsMax { + m := metricApachedruidSysFsMax{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysFsUsed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.fs.used metric with initial data. +func (m *metricApachedruidSysFsUsed) init() { + m.data.SetName("apachedruid.sys.fs.used") + m.data.SetDescription("Filesystem bytes used.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysFsUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) + dp.Attributes().PutStr("fs_dev_name", sysFsDevNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysFsUsed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysFsUsed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysFsUsed(cfg MetricConfig) metricApachedruidSysFsUsed { + m := metricApachedruidSysFsUsed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysLa1 struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.la.1 metric with initial data. +func (m *metricApachedruidSysLa1) init() { + m.data.SetName("apachedruid.sys.la.1") + m.data.SetDescription("System CPU load averages over past `i` minutes, where `i={1,5,15}`.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysLa1) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysLa1) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysLa1) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysLa1(cfg MetricConfig) metricApachedruidSysLa1 { + m := metricApachedruidSysLa1{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysLa15 struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.la.15 metric with initial data. +func (m *metricApachedruidSysLa15) init() { + m.data.SetName("apachedruid.sys.la.15") + m.data.SetDescription("System CPU load averages over past `i` minutes, where `i={1,5,15}`.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysLa15) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysLa15) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysLa15) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysLa15(cfg MetricConfig) metricApachedruidSysLa15 { + m := metricApachedruidSysLa15{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysLa5 struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.la.5 metric with initial data. +func (m *metricApachedruidSysLa5) init() { + m.data.SetName("apachedruid.sys.la.5") + m.data.SetDescription("System CPU load averages over past `i` minutes, where `i={1,5,15}`.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysLa5) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysLa5) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysLa5) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysLa5(cfg MetricConfig) metricApachedruidSysLa5 { + m := metricApachedruidSysLa5{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysMemFree struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.mem.free metric with initial data. +func (m *metricApachedruidSysMemFree) init() { + m.data.SetName("apachedruid.sys.mem.free") + m.data.SetDescription("Memory free.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysMemFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysMemFree) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysMemFree) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysMemFree(cfg MetricConfig) metricApachedruidSysMemFree { + m := metricApachedruidSysMemFree{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysMemMax struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.mem.max metric with initial data. +func (m *metricApachedruidSysMemMax) init() { + m.data.SetName("apachedruid.sys.mem.max") + m.data.SetDescription("Memory max.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysMemMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysMemMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysMemMax) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysMemMax(cfg MetricConfig) metricApachedruidSysMemMax { + m := metricApachedruidSysMemMax{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysMemUsed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.mem.used metric with initial data. +func (m *metricApachedruidSysMemUsed) init() { + m.data.SetName("apachedruid.sys.mem.used") + m.data.SetDescription("Memory used.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysMemUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysMemUsed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysMemUsed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysMemUsed(cfg MetricConfig) metricApachedruidSysMemUsed { + m := metricApachedruidSysMemUsed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysNetReadDropped struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.net.read.dropped metric with initial data. +func (m *metricApachedruidSysNetReadDropped) init() { + m.data.SetName("apachedruid.sys.net.read.dropped") + m.data.SetDescription("Total packets dropped coming from network.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysNetReadDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) + dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) + dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysNetReadDropped) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysNetReadDropped) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysNetReadDropped(cfg MetricConfig) metricApachedruidSysNetReadDropped { + m := metricApachedruidSysNetReadDropped{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysNetReadErrors struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.net.read.errors metric with initial data. +func (m *metricApachedruidSysNetReadErrors) init() { + m.data.SetName("apachedruid.sys.net.read.errors") + m.data.SetDescription("Total network read errors.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysNetReadErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) + dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) + dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysNetReadErrors) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysNetReadErrors) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysNetReadErrors(cfg MetricConfig) metricApachedruidSysNetReadErrors { + m := metricApachedruidSysNetReadErrors{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysNetReadPackets struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.net.read.packets metric with initial data. +func (m *metricApachedruidSysNetReadPackets) init() { + m.data.SetName("apachedruid.sys.net.read.packets") + m.data.SetDescription("Total packets read from the network.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysNetReadPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) + dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) + dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysNetReadPackets) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysNetReadPackets) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysNetReadPackets(cfg MetricConfig) metricApachedruidSysNetReadPackets { + m := metricApachedruidSysNetReadPackets{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysNetReadSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.net.read.size metric with initial data. +func (m *metricApachedruidSysNetReadSize) init() { + m.data.SetName("apachedruid.sys.net.read.size") + m.data.SetDescription("Bytes read from the network.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysNetReadSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) + dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) + dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysNetReadSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysNetReadSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysNetReadSize(cfg MetricConfig) metricApachedruidSysNetReadSize { + m := metricApachedruidSysNetReadSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysNetWriteCollisions struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.net.write.collisions metric with initial data. +func (m *metricApachedruidSysNetWriteCollisions) init() { + m.data.SetName("apachedruid.sys.net.write.collisions") + m.data.SetDescription("Total network write collisions.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysNetWriteCollisions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) + dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) + dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysNetWriteCollisions) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysNetWriteCollisions) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysNetWriteCollisions(cfg MetricConfig) metricApachedruidSysNetWriteCollisions { + m := metricApachedruidSysNetWriteCollisions{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysNetWriteErrors struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.net.write.errors metric with initial data. +func (m *metricApachedruidSysNetWriteErrors) init() { + m.data.SetName("apachedruid.sys.net.write.errors") + m.data.SetDescription("Total network write errors.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysNetWriteErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) + dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) + dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysNetWriteErrors) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysNetWriteErrors) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysNetWriteErrors(cfg MetricConfig) metricApachedruidSysNetWriteErrors { + m := metricApachedruidSysNetWriteErrors{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysNetWritePackets struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.net.write.packets metric with initial data. +func (m *metricApachedruidSysNetWritePackets) init() { + m.data.SetName("apachedruid.sys.net.write.packets") + m.data.SetDescription("Total packets written to the network.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysNetWritePackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) + dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) + dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysNetWritePackets) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysNetWritePackets) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysNetWritePackets(cfg MetricConfig) metricApachedruidSysNetWritePackets { + m := metricApachedruidSysNetWritePackets{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysNetWriteSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.net.write.size metric with initial data. +func (m *metricApachedruidSysNetWriteSize) init() { + m.data.SetName("apachedruid.sys.net.write.size") + m.data.SetDescription("Bytes written to the network.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysNetWriteSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) + dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) + dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysNetWriteSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysNetWriteSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysNetWriteSize(cfg MetricConfig) metricApachedruidSysNetWriteSize { + m := metricApachedruidSysNetWriteSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysStorageUsed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.storage.used metric with initial data. +func (m *metricApachedruidSysStorageUsed) init() { + m.data.SetName("apachedruid.sys.storage.used") + m.data.SetDescription("Disk space used.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidSysStorageUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysStorageUsed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysStorageUsed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysStorageUsed(cfg MetricConfig) metricApachedruidSysStorageUsed { + m := metricApachedruidSysStorageUsed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysSwapFree struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.swap.free metric with initial data. +func (m *metricApachedruidSysSwapFree) init() { + m.data.SetName("apachedruid.sys.swap.free") + m.data.SetDescription("Free swap.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysSwapFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysSwapFree) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysSwapFree) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysSwapFree(cfg MetricConfig) metricApachedruidSysSwapFree { + m := metricApachedruidSysSwapFree{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysSwapMax struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.swap.max metric with initial data. +func (m *metricApachedruidSysSwapMax) init() { + m.data.SetName("apachedruid.sys.swap.max") + m.data.SetDescription("Max swap.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysSwapMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysSwapMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysSwapMax) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysSwapMax(cfg MetricConfig) metricApachedruidSysSwapMax { + m := metricApachedruidSysSwapMax{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysSwapPageIn struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.swap.page_in metric with initial data. +func (m *metricApachedruidSysSwapPageIn) init() { + m.data.SetName("apachedruid.sys.swap.page_in") + m.data.SetDescription("Paged in swap.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysSwapPageIn) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysSwapPageIn) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysSwapPageIn) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysSwapPageIn(cfg MetricConfig) metricApachedruidSysSwapPageIn { + m := metricApachedruidSysSwapPageIn{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysSwapPageOut struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.swap.page_out metric with initial data. +func (m *metricApachedruidSysSwapPageOut) init() { + m.data.SetName("apachedruid.sys.swap.page_out") + m.data.SetDescription("Paged out swap.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysSwapPageOut) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysSwapPageOut) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysSwapPageOut) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysSwapPageOut(cfg MetricConfig) metricApachedruidSysSwapPageOut { + m := metricApachedruidSysSwapPageOut{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4ActiveOpens struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.active_opens metric with initial data. +func (m *metricApachedruidSysTcpv4ActiveOpens) init() { + m.data.SetName("apachedruid.sys.tcpv4.active_opens") + m.data.SetDescription("Total TCP active open connections.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4ActiveOpens) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4ActiveOpens) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4ActiveOpens) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4ActiveOpens(cfg MetricConfig) metricApachedruidSysTcpv4ActiveOpens { + m := metricApachedruidSysTcpv4ActiveOpens{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4AttemptFails struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.attempt_fails metric with initial data. +func (m *metricApachedruidSysTcpv4AttemptFails) init() { + m.data.SetName("apachedruid.sys.tcpv4.attempt_fails") + m.data.SetDescription("Total TCP active connection failures.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4AttemptFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4AttemptFails) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4AttemptFails) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4AttemptFails(cfg MetricConfig) metricApachedruidSysTcpv4AttemptFails { + m := metricApachedruidSysTcpv4AttemptFails{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4EstabResets struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.estab_resets metric with initial data. +func (m *metricApachedruidSysTcpv4EstabResets) init() { + m.data.SetName("apachedruid.sys.tcpv4.estab_resets") + m.data.SetDescription("Total TCP connection resets.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4EstabResets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4EstabResets) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4EstabResets) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4EstabResets(cfg MetricConfig) metricApachedruidSysTcpv4EstabResets { + m := metricApachedruidSysTcpv4EstabResets{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4InErrs struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.in.errs metric with initial data. +func (m *metricApachedruidSysTcpv4InErrs) init() { + m.data.SetName("apachedruid.sys.tcpv4.in.errs") + m.data.SetDescription("Errors while reading segments.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4InErrs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4InErrs) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4InErrs) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4InErrs(cfg MetricConfig) metricApachedruidSysTcpv4InErrs { + m := metricApachedruidSysTcpv4InErrs{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4InSegs struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.in.segs metric with initial data. +func (m *metricApachedruidSysTcpv4InSegs) init() { + m.data.SetName("apachedruid.sys.tcpv4.in.segs") + m.data.SetDescription("Total segments received in connection.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4InSegs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4InSegs) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4InSegs) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4InSegs(cfg MetricConfig) metricApachedruidSysTcpv4InSegs { + m := metricApachedruidSysTcpv4InSegs{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4OutRsts struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.out.rsts metric with initial data. +func (m *metricApachedruidSysTcpv4OutRsts) init() { + m.data.SetName("apachedruid.sys.tcpv4.out.rsts") + m.data.SetDescription("Total `out reset` packets sent to reset the connection.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4OutRsts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4OutRsts) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4OutRsts) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4OutRsts(cfg MetricConfig) metricApachedruidSysTcpv4OutRsts { + m := metricApachedruidSysTcpv4OutRsts{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4OutSegs struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.out.segs metric with initial data. +func (m *metricApachedruidSysTcpv4OutSegs) init() { + m.data.SetName("apachedruid.sys.tcpv4.out.segs") + m.data.SetDescription("Total segments sent.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4OutSegs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4OutSegs) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4OutSegs) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4OutSegs(cfg MetricConfig) metricApachedruidSysTcpv4OutSegs { + m := metricApachedruidSysTcpv4OutSegs{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4PassiveOpens struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.passive_opens metric with initial data. +func (m *metricApachedruidSysTcpv4PassiveOpens) init() { + m.data.SetName("apachedruid.sys.tcpv4.passive_opens") + m.data.SetDescription("Total TCP passive open connections.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4PassiveOpens) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4PassiveOpens) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4PassiveOpens) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4PassiveOpens(cfg MetricConfig) metricApachedruidSysTcpv4PassiveOpens { + m := metricApachedruidSysTcpv4PassiveOpens{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysTcpv4RetransSegs struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.tcpv4.retrans.segs metric with initial data. +func (m *metricApachedruidSysTcpv4RetransSegs) init() { + m.data.SetName("apachedruid.sys.tcpv4.retrans.segs") + m.data.SetDescription("Total segments re-transmitted.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysTcpv4RetransSegs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysTcpv4RetransSegs) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysTcpv4RetransSegs) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysTcpv4RetransSegs(cfg MetricConfig) metricApachedruidSysTcpv4RetransSegs { + m := metricApachedruidSysTcpv4RetransSegs{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidSysUptime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.sys.uptime metric with initial data. +func (m *metricApachedruidSysUptime) init() { + m.data.SetName("apachedruid.sys.uptime") + m.data.SetDescription("Total system uptime.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidSysUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidSysUptime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidSysUptime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidSysUptime(cfg MetricConfig) metricApachedruidSysUptime { + m := metricApachedruidSysUptime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskActionBatchAttempts struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.action.batch.attempts metric with initial data. +func (m *metricApachedruidTaskActionBatchAttempts) init() { + m.data.SetName("apachedruid.task.action.batch.attempts") + m.data.SetDescription("Number of execution attempts for a single batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") + m.data.SetUnit("{attempts}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskActionBatchAttempts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interval", taskIntervalAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskActionBatchAttempts) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskActionBatchAttempts) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskActionBatchAttempts(cfg MetricConfig) metricApachedruidTaskActionBatchAttempts { + m := metricApachedruidTaskActionBatchAttempts{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskActionBatchQueueTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.action.batch.queue_time metric with initial data. +func (m *metricApachedruidTaskActionBatchQueueTime) init() { + m.data.SetName("apachedruid.task.action.batch.queue_time") + m.data.SetDescription("Milliseconds spent by a batch of task actions in queue. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskActionBatchQueueTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interval", taskIntervalAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskActionBatchQueueTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskActionBatchQueueTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskActionBatchQueueTime(cfg MetricConfig) metricApachedruidTaskActionBatchQueueTime { + m := metricApachedruidTaskActionBatchQueueTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskActionBatchRunTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.action.batch.run_time metric with initial data. +func (m *metricApachedruidTaskActionBatchRunTime) init() { + m.data.SetName("apachedruid.task.action.batch.run_time") + m.data.SetDescription("Milliseconds taken to execute a batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskActionBatchRunTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interval", taskIntervalAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskActionBatchRunTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskActionBatchRunTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskActionBatchRunTime(cfg MetricConfig) metricApachedruidTaskActionBatchRunTime { + m := metricApachedruidTaskActionBatchRunTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskActionBatchSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.action.batch.size metric with initial data. +func (m *metricApachedruidTaskActionBatchSize) init() { + m.data.SetName("apachedruid.task.action.batch.size") + m.data.SetDescription("Number of task actions in a batch that was executed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") + m.data.SetUnit("{actions}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskActionBatchSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interval", taskIntervalAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskActionBatchSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskActionBatchSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskActionBatchSize(cfg MetricConfig) metricApachedruidTaskActionBatchSize { + m := metricApachedruidTaskActionBatchSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskActionFailedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.action.failed.count metric with initial data. +func (m *metricApachedruidTaskActionFailedCount) init() { + m.data.SetName("apachedruid.task.action.failed.count") + m.data.SetDescription("Number of task actions that failed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") + m.data.SetUnit("{actions}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskActionFailedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", taskTypeAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) + dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) + dp.Attributes().PutStr("tags", taskTagsAttributeValue) + dp.Attributes().PutStr("task_id", taskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskActionFailedCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskActionFailedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskActionFailedCount(cfg MetricConfig) metricApachedruidTaskActionFailedCount { + m := metricApachedruidTaskActionFailedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskActionLogTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.action.log.time metric with initial data. +func (m *metricApachedruidTaskActionLogTime) init() { + m.data.SetName("apachedruid.task.action.log.time") + m.data.SetDescription("Milliseconds taken to log a task action to the audit log.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskActionLogTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", taskTypeAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) + dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) + dp.Attributes().PutStr("tags", taskTagsAttributeValue) + dp.Attributes().PutStr("task_id", taskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskActionLogTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskActionLogTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskActionLogTime(cfg MetricConfig) metricApachedruidTaskActionLogTime { + m := metricApachedruidTaskActionLogTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskActionRunTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.action.run.time metric with initial data. +func (m *metricApachedruidTaskActionRunTime) init() { + m.data.SetName("apachedruid.task.action.run.time") + m.data.SetDescription("Milliseconds taken to execute a task action.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskActionRunTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", taskTypeAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) + dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) + dp.Attributes().PutStr("tags", taskTagsAttributeValue) + dp.Attributes().PutStr("task_id", taskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskActionRunTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskActionRunTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskActionRunTime(cfg MetricConfig) metricApachedruidTaskActionRunTime { + m := metricApachedruidTaskActionRunTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskActionSuccessCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.action.success.count metric with initial data. +func (m *metricApachedruidTaskActionSuccessCount) init() { + m.data.SetName("apachedruid.task.action.success.count") + m.data.SetDescription("Number of task actions that were executed successfully during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") + m.data.SetUnit("{actions}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskActionSuccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", taskTypeAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) + dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) + dp.Attributes().PutStr("tags", taskTagsAttributeValue) + dp.Attributes().PutStr("task_id", taskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskActionSuccessCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskActionSuccessCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskActionSuccessCount(cfg MetricConfig) metricApachedruidTaskActionSuccessCount { + m := metricApachedruidTaskActionSuccessCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskFailedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.failed.count metric with initial data. +func (m *metricApachedruidTaskFailedCount) init() { + m.data.SetName("apachedruid.task.failed.count") + m.data.SetDescription("Number of failed tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskFailedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskFailedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskFailedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskFailedCount(cfg MetricConfig) metricApachedruidTaskFailedCount { + m := metricApachedruidTaskFailedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskPendingCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.pending.count metric with initial data. +func (m *metricApachedruidTaskPendingCount) init() { + m.data.SetName("apachedruid.task.pending.count") + m.data.SetDescription("Number of current pending tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskPendingCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskPendingCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskPendingCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskPendingCount(cfg MetricConfig) metricApachedruidTaskPendingCount { + m := metricApachedruidTaskPendingCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskPendingTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.pending.time metric with initial data. +func (m *metricApachedruidTaskPendingTime) init() { + m.data.SetName("apachedruid.task.pending.time") + m.data.SetDescription("Milliseconds taken for a task to wait for running.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskPendingTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", taskTypeAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) + dp.Attributes().PutStr("tags", taskTagsAttributeValue) + dp.Attributes().PutStr("task_id", taskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskPendingTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskPendingTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskPendingTime(cfg MetricConfig) metricApachedruidTaskPendingTime { + m := metricApachedruidTaskPendingTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskRunTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.run.time metric with initial data. +func (m *metricApachedruidTaskRunTime) init() { + m.data.SetName("apachedruid.task.run.time") + m.data.SetDescription("Milliseconds taken to run a task.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskRunTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskStatusAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", taskTypeAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) + dp.Attributes().PutStr("task_status", taskStatusAttributeValue) + dp.Attributes().PutStr("tags", taskTagsAttributeValue) + dp.Attributes().PutStr("task_id", taskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskRunTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskRunTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskRunTime(cfg MetricConfig) metricApachedruidTaskRunTime { + m := metricApachedruidTaskRunTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskRunningCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.running.count metric with initial data. +func (m *metricApachedruidTaskRunningCount) init() { + m.data.SetName("apachedruid.task.running.count") + m.data.SetDescription("Number of current running tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskRunningCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskRunningCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskRunningCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskRunningCount(cfg MetricConfig) metricApachedruidTaskRunningCount { + m := metricApachedruidTaskRunningCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskSegmentAvailabilityWaitTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.segment_availability.wait.time metric with initial data. +func (m *metricApachedruidTaskSegmentAvailabilityWaitTime) init() { + m.data.SetName("apachedruid.task.segment_availability.wait.time") + m.data.SetDescription("The amount of milliseconds a batch indexing task waited for newly created segments to become available for querying.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskSegmentAvailabilityWaitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskSegmentAvailabilityConfirmedAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("task_type", taskTypeAttributeValue) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) + dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) + dp.Attributes().PutStr("segment_availability_confirmed", taskSegmentAvailabilityConfirmedAttributeValue) + dp.Attributes().PutStr("tags", taskTagsAttributeValue) + dp.Attributes().PutStr("task_id", taskIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskSegmentAvailabilityWaitTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskSegmentAvailabilityWaitTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskSegmentAvailabilityWaitTime(cfg MetricConfig) metricApachedruidTaskSegmentAvailabilityWaitTime { + m := metricApachedruidTaskSegmentAvailabilityWaitTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskSuccessCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.success.count metric with initial data. +func (m *metricApachedruidTaskSuccessCount) init() { + m.data.SetName("apachedruid.task.success.count") + m.data.SetDescription("Number of successful tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskSuccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskSuccessCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskSuccessCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskSuccessCount(cfg MetricConfig) metricApachedruidTaskSuccessCount { + m := metricApachedruidTaskSuccessCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskWaitingCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task.waiting.count metric with initial data. +func (m *metricApachedruidTaskWaitingCount) init() { + m.data.SetName("apachedruid.task.waiting.count") + m.data.SetDescription("Number of current waiting tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskWaitingCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskWaitingCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskWaitingCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskWaitingCount(cfg MetricConfig) metricApachedruidTaskWaitingCount { + m := metricApachedruidTaskWaitingCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskSlotBlacklistedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task_slot.blacklisted.count metric with initial data. +func (m *metricApachedruidTaskSlotBlacklistedCount) init() { + m.data.SetName("apachedruid.task_slot.blacklisted.count") + m.data.SetDescription("Number of total task slots in blacklisted Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") + m.data.SetUnit("{slots}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskSlotBlacklistedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskSlotBlacklistedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskSlotBlacklistedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskSlotBlacklistedCount(cfg MetricConfig) metricApachedruidTaskSlotBlacklistedCount { + m := metricApachedruidTaskSlotBlacklistedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskSlotIdleCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task_slot.idle.count metric with initial data. +func (m *metricApachedruidTaskSlotIdleCount) init() { + m.data.SetName("apachedruid.task_slot.idle.count") + m.data.SetDescription("Number of idle task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") + m.data.SetUnit("{slots}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskSlotIdleCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskSlotIdleCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskSlotIdleCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskSlotIdleCount(cfg MetricConfig) metricApachedruidTaskSlotIdleCount { + m := metricApachedruidTaskSlotIdleCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskSlotLazyCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task_slot.lazy.count metric with initial data. +func (m *metricApachedruidTaskSlotLazyCount) init() { + m.data.SetName("apachedruid.task_slot.lazy.count") + m.data.SetDescription("Number of total task slots in lazy marked Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") + m.data.SetUnit("{slots}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskSlotLazyCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskSlotLazyCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskSlotLazyCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskSlotLazyCount(cfg MetricConfig) metricApachedruidTaskSlotLazyCount { + m := metricApachedruidTaskSlotLazyCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskSlotTotalCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task_slot.total.count metric with initial data. +func (m *metricApachedruidTaskSlotTotalCount) init() { + m.data.SetName("apachedruid.task_slot.total.count") + m.data.SetDescription("Number of total task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") + m.data.SetUnit("{slots}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskSlotTotalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskSlotTotalCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskSlotTotalCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskSlotTotalCount(cfg MetricConfig) metricApachedruidTaskSlotTotalCount { + m := metricApachedruidTaskSlotTotalCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTaskSlotUsedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.task_slot.used.count metric with initial data. +func (m *metricApachedruidTaskSlotUsedCount) init() { + m.data.SetName("apachedruid.task_slot.used.count") + m.data.SetDescription("Number of busy task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") + m.data.SetUnit("{slots}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTaskSlotUsedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTaskSlotUsedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTaskSlotUsedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTaskSlotUsedCount(cfg MetricConfig) metricApachedruidTaskSlotUsedCount { + m := metricApachedruidTaskSlotUsedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTierHistoricalCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.tier.historical.count metric with initial data. +func (m *metricApachedruidTierHistoricalCount) init() { + m.data.SetName("apachedruid.tier.historical.count") + m.data.SetDescription("Number of available historical nodes in each tier.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTierHistoricalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tierAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", tierAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTierHistoricalCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTierHistoricalCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTierHistoricalCount(cfg MetricConfig) metricApachedruidTierHistoricalCount { + m := metricApachedruidTierHistoricalCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTierReplicationFactor struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.tier.replication.factor metric with initial data. +func (m *metricApachedruidTierReplicationFactor) init() { + m.data.SetName("apachedruid.tier.replication.factor") + m.data.SetDescription("Configured maximum replication factor in each tier.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTierReplicationFactor) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tierAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", tierAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTierReplicationFactor) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTierReplicationFactor) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTierReplicationFactor(cfg MetricConfig) metricApachedruidTierReplicationFactor { + m := metricApachedruidTierReplicationFactor{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTierRequiredCapacity struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.tier.required.capacity metric with initial data. +func (m *metricApachedruidTierRequiredCapacity) init() { + m.data.SetName("apachedruid.tier.required.capacity") + m.data.SetDescription("Total capacity in bytes required in each tier.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTierRequiredCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tierAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", tierAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTierRequiredCapacity) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTierRequiredCapacity) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTierRequiredCapacity(cfg MetricConfig) metricApachedruidTierRequiredCapacity { + m := metricApachedruidTierRequiredCapacity{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidTierTotalCapacity struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.tier.total.capacity metric with initial data. +func (m *metricApachedruidTierTotalCapacity) init() { + m.data.SetName("apachedruid.tier.total.capacity") + m.data.SetDescription("Total capacity in bytes available in each tier.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidTierTotalCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tierAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("tier", tierAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidTierTotalCapacity) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidTierTotalCapacity) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidTierTotalCapacity(cfg MetricConfig) metricApachedruidTierTotalCapacity { + m := metricApachedruidTierTotalCapacity{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidWorkerTaskFailedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.worker.task.failed.count metric with initial data. +func (m *metricApachedruidWorkerTaskFailedCount) init() { + m.data.SetName("apachedruid.worker.task.failed.count") + m.data.SetDescription("Number of failed tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidWorkerTaskFailedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", workerCategoryAttributeValue) + dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidWorkerTaskFailedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidWorkerTaskFailedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidWorkerTaskFailedCount(cfg MetricConfig) metricApachedruidWorkerTaskFailedCount { + m := metricApachedruidWorkerTaskFailedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidWorkerTaskSuccessCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.worker.task.success.count metric with initial data. +func (m *metricApachedruidWorkerTaskSuccessCount) init() { + m.data.SetName("apachedruid.worker.task.success.count") + m.data.SetDescription("Number of successful tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.") + m.data.SetUnit("{tasks}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidWorkerTaskSuccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", workerCategoryAttributeValue) + dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidWorkerTaskSuccessCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidWorkerTaskSuccessCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidWorkerTaskSuccessCount(cfg MetricConfig) metricApachedruidWorkerTaskSuccessCount { + m := metricApachedruidWorkerTaskSuccessCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidWorkerTaskSlotIdleCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.worker.task_slot.idle.count metric with initial data. +func (m *metricApachedruidWorkerTaskSlotIdleCount) init() { + m.data.SetName("apachedruid.worker.task_slot.idle.count") + m.data.SetDescription("Number of idle task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.") + m.data.SetUnit("{slots}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidWorkerTaskSlotIdleCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", workerCategoryAttributeValue) + dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidWorkerTaskSlotIdleCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidWorkerTaskSlotIdleCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidWorkerTaskSlotIdleCount(cfg MetricConfig) metricApachedruidWorkerTaskSlotIdleCount { + m := metricApachedruidWorkerTaskSlotIdleCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidWorkerTaskSlotTotalCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.worker.task_slot.total.count metric with initial data. +func (m *metricApachedruidWorkerTaskSlotTotalCount) init() { + m.data.SetName("apachedruid.worker.task_slot.total.count") + m.data.SetDescription("Number of total task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included.") + m.data.SetUnit("{slots}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidWorkerTaskSlotTotalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", workerCategoryAttributeValue) + dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidWorkerTaskSlotTotalCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidWorkerTaskSlotTotalCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidWorkerTaskSlotTotalCount(cfg MetricConfig) metricApachedruidWorkerTaskSlotTotalCount { + m := metricApachedruidWorkerTaskSlotTotalCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidWorkerTaskSlotUsedCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.worker.task_slot.used.count metric with initial data. +func (m *metricApachedruidWorkerTaskSlotUsedCount) init() { + m.data.SetName("apachedruid.worker.task_slot.used.count") + m.data.SetDescription("Number of busy task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included.") + m.data.SetUnit("{slots}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricApachedruidWorkerTaskSlotUsedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("category", workerCategoryAttributeValue) + dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidWorkerTaskSlotUsedCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidWorkerTaskSlotUsedCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidWorkerTaskSlotUsedCount(cfg MetricConfig) metricApachedruidWorkerTaskSlotUsedCount { + m := metricApachedruidWorkerTaskSlotUsedCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidZkConnected struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.zk.connected metric with initial data. +func (m *metricApachedruidZkConnected) init() { + m.data.SetName("apachedruid.zk.connected") + m.data.SetDescription("Indicator of connection status. `1` for connected, `0` for disconnected. Emitted once per monitor period.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidZkConnected) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidZkConnected) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidZkConnected) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidZkConnected(cfg MetricConfig) metricApachedruidZkConnected { + m := metricApachedruidZkConnected{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricApachedruidZkReconnectTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills apachedruid.zk.reconnect.time metric with initial data. +func (m *metricApachedruidZkReconnectTime) init() { + m.data.SetName("apachedruid.zk.reconnect.time") + m.data.SetDescription("Amount of time, in milliseconds, that a server was disconnected from ZooKeeper before reconnecting. Emitted on reconnection. Not emitted if connection to ZooKeeper is permanently lost, because in this case, there is no reconnection.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() +} + +func (m *metricApachedruidZkReconnectTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricApachedruidZkReconnectTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricApachedruidZkReconnectTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricApachedruidZkReconnectTime(cfg MetricConfig) metricApachedruidZkReconnectTime { + m := metricApachedruidZkReconnectTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user config. +type MetricsBuilder struct { + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis + metricApachedruidCompactTaskCount metricApachedruidCompactTaskCount + metricApachedruidCompactTaskAvailableSlotCount metricApachedruidCompactTaskAvailableSlotCount + metricApachedruidCompactTaskMaxSlotCount metricApachedruidCompactTaskMaxSlotCount + metricApachedruidCoordinatorGlobalTime metricApachedruidCoordinatorGlobalTime + metricApachedruidCoordinatorTime metricApachedruidCoordinatorTime + metricApachedruidIngestBytesReceived metricApachedruidIngestBytesReceived + metricApachedruidIngestCount metricApachedruidIngestCount + metricApachedruidIngestEventsBuffered metricApachedruidIngestEventsBuffered + metricApachedruidIngestEventsDuplicate metricApachedruidIngestEventsDuplicate + metricApachedruidIngestEventsMessageGap metricApachedruidIngestEventsMessageGap + metricApachedruidIngestEventsProcessed metricApachedruidIngestEventsProcessed + metricApachedruidIngestEventsProcessedWithError metricApachedruidIngestEventsProcessedWithError + metricApachedruidIngestEventsThrownAway metricApachedruidIngestEventsThrownAway + metricApachedruidIngestEventsUnparseable metricApachedruidIngestEventsUnparseable + metricApachedruidIngestHandoffCount metricApachedruidIngestHandoffCount + metricApachedruidIngestHandoffFailed metricApachedruidIngestHandoffFailed + metricApachedruidIngestHandoffTime metricApachedruidIngestHandoffTime + metricApachedruidIngestInputBytes metricApachedruidIngestInputBytes + metricApachedruidIngestKafkaAvgLag metricApachedruidIngestKafkaAvgLag + metricApachedruidIngestKafkaLag metricApachedruidIngestKafkaLag + metricApachedruidIngestKafkaMaxLag metricApachedruidIngestKafkaMaxLag + metricApachedruidIngestKafkaPartitionLag metricApachedruidIngestKafkaPartitionLag + metricApachedruidIngestKinesisAvgLagTime metricApachedruidIngestKinesisAvgLagTime + metricApachedruidIngestKinesisLagTime metricApachedruidIngestKinesisLagTime + metricApachedruidIngestKinesisMaxLagTime metricApachedruidIngestKinesisMaxLagTime + metricApachedruidIngestKinesisPartitionLagTime metricApachedruidIngestKinesisPartitionLagTime + metricApachedruidIngestMergeCPU metricApachedruidIngestMergeCPU + metricApachedruidIngestMergeTime metricApachedruidIngestMergeTime + metricApachedruidIngestNoticesQueueSize metricApachedruidIngestNoticesQueueSize + metricApachedruidIngestNoticesTime metricApachedruidIngestNoticesTime + metricApachedruidIngestPauseTime metricApachedruidIngestPauseTime + metricApachedruidIngestPersistsBackPressure metricApachedruidIngestPersistsBackPressure + metricApachedruidIngestPersistsCount metricApachedruidIngestPersistsCount + metricApachedruidIngestPersistsCPU metricApachedruidIngestPersistsCPU + metricApachedruidIngestPersistsFailed metricApachedruidIngestPersistsFailed + metricApachedruidIngestPersistsTime metricApachedruidIngestPersistsTime + metricApachedruidIngestRowsOutput metricApachedruidIngestRowsOutput + metricApachedruidIngestSegmentsCount metricApachedruidIngestSegmentsCount + metricApachedruidIngestShuffleBytes metricApachedruidIngestShuffleBytes + metricApachedruidIngestShuffleRequests metricApachedruidIngestShuffleRequests + metricApachedruidIngestSinkCount metricApachedruidIngestSinkCount + metricApachedruidIngestTombstonesCount metricApachedruidIngestTombstonesCount + metricApachedruidIntervalCompactedCount metricApachedruidIntervalCompactedCount + metricApachedruidIntervalSkipCompactCount metricApachedruidIntervalSkipCompactCount + metricApachedruidIntervalWaitCompactCount metricApachedruidIntervalWaitCompactCount + metricApachedruidJettyNumOpenConnections metricApachedruidJettyNumOpenConnections + metricApachedruidJettyThreadPoolBusy metricApachedruidJettyThreadPoolBusy + metricApachedruidJettyThreadPoolIdle metricApachedruidJettyThreadPoolIdle + metricApachedruidJettyThreadPoolIsLowOnThreads metricApachedruidJettyThreadPoolIsLowOnThreads + metricApachedruidJettyThreadPoolMax metricApachedruidJettyThreadPoolMax + metricApachedruidJettyThreadPoolMin metricApachedruidJettyThreadPoolMin + metricApachedruidJettyThreadPoolQueueSize metricApachedruidJettyThreadPoolQueueSize + metricApachedruidJettyThreadPoolTotal metricApachedruidJettyThreadPoolTotal + metricApachedruidJvmBufferpoolCapacity metricApachedruidJvmBufferpoolCapacity + metricApachedruidJvmBufferpoolCount metricApachedruidJvmBufferpoolCount + metricApachedruidJvmBufferpoolUsed metricApachedruidJvmBufferpoolUsed + metricApachedruidJvmGcCount metricApachedruidJvmGcCount + metricApachedruidJvmGcCPU metricApachedruidJvmGcCPU + metricApachedruidJvmMemCommitted metricApachedruidJvmMemCommitted + metricApachedruidJvmMemInit metricApachedruidJvmMemInit + metricApachedruidJvmMemMax metricApachedruidJvmMemMax + metricApachedruidJvmMemUsed metricApachedruidJvmMemUsed + metricApachedruidJvmPoolCommitted metricApachedruidJvmPoolCommitted + metricApachedruidJvmPoolInit metricApachedruidJvmPoolInit + metricApachedruidJvmPoolMax metricApachedruidJvmPoolMax + metricApachedruidJvmPoolUsed metricApachedruidJvmPoolUsed + metricApachedruidKillPendingSegmentsCount metricApachedruidKillPendingSegmentsCount + metricApachedruidKillTaskCount metricApachedruidKillTaskCount + metricApachedruidKillTaskAvailableSlotCount metricApachedruidKillTaskAvailableSlotCount + metricApachedruidKillTaskMaxSlotCount metricApachedruidKillTaskMaxSlotCount + metricApachedruidMergeBufferPendingRequests metricApachedruidMergeBufferPendingRequests + metricApachedruidMetadataKillAuditCount metricApachedruidMetadataKillAuditCount + metricApachedruidMetadataKillCompactionCount metricApachedruidMetadataKillCompactionCount + metricApachedruidMetadataKillDatasourceCount metricApachedruidMetadataKillDatasourceCount + metricApachedruidMetadataKillRuleCount metricApachedruidMetadataKillRuleCount + metricApachedruidMetadataKillSupervisorCount metricApachedruidMetadataKillSupervisorCount + metricApachedruidMetadatacacheInitTime metricApachedruidMetadatacacheInitTime + metricApachedruidMetadatacacheRefreshCount metricApachedruidMetadatacacheRefreshCount + metricApachedruidMetadatacacheRefreshTime metricApachedruidMetadatacacheRefreshTime + metricApachedruidQueryByteLimitExceededCount metricApachedruidQueryByteLimitExceededCount + metricApachedruidQueryBytes metricApachedruidQueryBytes + metricApachedruidQueryCacheDeltaAverageBytes metricApachedruidQueryCacheDeltaAverageBytes + metricApachedruidQueryCacheDeltaErrors metricApachedruidQueryCacheDeltaErrors + metricApachedruidQueryCacheDeltaEvictions metricApachedruidQueryCacheDeltaEvictions + metricApachedruidQueryCacheDeltaHitRate metricApachedruidQueryCacheDeltaHitRate + metricApachedruidQueryCacheDeltaHits metricApachedruidQueryCacheDeltaHits + metricApachedruidQueryCacheDeltaMisses metricApachedruidQueryCacheDeltaMisses + metricApachedruidQueryCacheDeltaNumEntries metricApachedruidQueryCacheDeltaNumEntries + metricApachedruidQueryCacheDeltaPutError metricApachedruidQueryCacheDeltaPutError + metricApachedruidQueryCacheDeltaPutOk metricApachedruidQueryCacheDeltaPutOk + metricApachedruidQueryCacheDeltaPutOversized metricApachedruidQueryCacheDeltaPutOversized + metricApachedruidQueryCacheDeltaSizeBytes metricApachedruidQueryCacheDeltaSizeBytes + metricApachedruidQueryCacheDeltaTimeouts metricApachedruidQueryCacheDeltaTimeouts + metricApachedruidQueryCacheMemcachedDelta metricApachedruidQueryCacheMemcachedDelta + metricApachedruidQueryCacheMemcachedTotal metricApachedruidQueryCacheMemcachedTotal + metricApachedruidQueryCacheTotalAverageBytes metricApachedruidQueryCacheTotalAverageBytes + metricApachedruidQueryCacheTotalErrors metricApachedruidQueryCacheTotalErrors + metricApachedruidQueryCacheTotalEvictions metricApachedruidQueryCacheTotalEvictions + metricApachedruidQueryCacheTotalHitRate metricApachedruidQueryCacheTotalHitRate + metricApachedruidQueryCacheTotalHits metricApachedruidQueryCacheTotalHits + metricApachedruidQueryCacheTotalMisses metricApachedruidQueryCacheTotalMisses + metricApachedruidQueryCacheTotalNumEntries metricApachedruidQueryCacheTotalNumEntries + metricApachedruidQueryCacheTotalPutError metricApachedruidQueryCacheTotalPutError + metricApachedruidQueryCacheTotalPutOk metricApachedruidQueryCacheTotalPutOk + metricApachedruidQueryCacheTotalPutOversized metricApachedruidQueryCacheTotalPutOversized + metricApachedruidQueryCacheTotalSizeBytes metricApachedruidQueryCacheTotalSizeBytes + metricApachedruidQueryCacheTotalTimeouts metricApachedruidQueryCacheTotalTimeouts + metricApachedruidQueryCount metricApachedruidQueryCount + metricApachedruidQueryCPUTime metricApachedruidQueryCPUTime + metricApachedruidQueryFailedCount metricApachedruidQueryFailedCount + metricApachedruidQueryInterruptedCount metricApachedruidQueryInterruptedCount + metricApachedruidQueryNodeBackpressure metricApachedruidQueryNodeBackpressure + metricApachedruidQueryNodeBytes metricApachedruidQueryNodeBytes + metricApachedruidQueryNodeTime metricApachedruidQueryNodeTime + metricApachedruidQueryNodeTtfb metricApachedruidQueryNodeTtfb + metricApachedruidQueryPriority metricApachedruidQueryPriority + metricApachedruidQueryRowLimitExceededCount metricApachedruidQueryRowLimitExceededCount + metricApachedruidQuerySegmentTime metricApachedruidQuerySegmentTime + metricApachedruidQuerySegmentAndCacheTime metricApachedruidQuerySegmentAndCacheTime + metricApachedruidQuerySegmentsCount metricApachedruidQuerySegmentsCount + metricApachedruidQuerySuccessCount metricApachedruidQuerySuccessCount + metricApachedruidQueryTime metricApachedruidQueryTime + metricApachedruidQueryTimeoutCount metricApachedruidQueryTimeoutCount + metricApachedruidQueryWaitTime metricApachedruidQueryWaitTime + metricApachedruidSegmentAddedBytes metricApachedruidSegmentAddedBytes + metricApachedruidSegmentAssignSkippedCount metricApachedruidSegmentAssignSkippedCount + metricApachedruidSegmentAssignedCount metricApachedruidSegmentAssignedCount + metricApachedruidSegmentCompactedBytes metricApachedruidSegmentCompactedBytes + metricApachedruidSegmentCompactedCount metricApachedruidSegmentCompactedCount + metricApachedruidSegmentCount metricApachedruidSegmentCount + metricApachedruidSegmentDeletedCount metricApachedruidSegmentDeletedCount + metricApachedruidSegmentDropQueueCount metricApachedruidSegmentDropQueueCount + metricApachedruidSegmentDropSkippedCount metricApachedruidSegmentDropSkippedCount + metricApachedruidSegmentDroppedCount metricApachedruidSegmentDroppedCount + metricApachedruidSegmentLoadQueueAssigned metricApachedruidSegmentLoadQueueAssigned + metricApachedruidSegmentLoadQueueCancelled metricApachedruidSegmentLoadQueueCancelled + metricApachedruidSegmentLoadQueueCount metricApachedruidSegmentLoadQueueCount + metricApachedruidSegmentLoadQueueFailed metricApachedruidSegmentLoadQueueFailed + metricApachedruidSegmentLoadQueueSize metricApachedruidSegmentLoadQueueSize + metricApachedruidSegmentLoadQueueSuccess metricApachedruidSegmentLoadQueueSuccess + metricApachedruidSegmentMax metricApachedruidSegmentMax + metricApachedruidSegmentMoveSkippedCount metricApachedruidSegmentMoveSkippedCount + metricApachedruidSegmentMovedBytes metricApachedruidSegmentMovedBytes + metricApachedruidSegmentMovedCount metricApachedruidSegmentMovedCount + metricApachedruidSegmentNukedBytes metricApachedruidSegmentNukedBytes + metricApachedruidSegmentOverShadowedCount metricApachedruidSegmentOverShadowedCount + metricApachedruidSegmentPendingDelete metricApachedruidSegmentPendingDelete + metricApachedruidSegmentRowCountAvg metricApachedruidSegmentRowCountAvg + metricApachedruidSegmentRowCountRangeCount metricApachedruidSegmentRowCountRangeCount + metricApachedruidSegmentScanActive metricApachedruidSegmentScanActive + metricApachedruidSegmentScanPending metricApachedruidSegmentScanPending + metricApachedruidSegmentSize metricApachedruidSegmentSize + metricApachedruidSegmentSkipCompactBytes metricApachedruidSegmentSkipCompactBytes + metricApachedruidSegmentSkipCompactCount metricApachedruidSegmentSkipCompactCount + metricApachedruidSegmentUnavailableCount metricApachedruidSegmentUnavailableCount + metricApachedruidSegmentUnderReplicatedCount metricApachedruidSegmentUnderReplicatedCount + metricApachedruidSegmentUnneededCount metricApachedruidSegmentUnneededCount + metricApachedruidSegmentUsed metricApachedruidSegmentUsed + metricApachedruidSegmentUsedPercent metricApachedruidSegmentUsedPercent + metricApachedruidSegmentWaitCompactBytes metricApachedruidSegmentWaitCompactBytes + metricApachedruidSegmentWaitCompactCount metricApachedruidSegmentWaitCompactCount + metricApachedruidServerviewInitTime metricApachedruidServerviewInitTime + metricApachedruidServerviewSyncHealthy metricApachedruidServerviewSyncHealthy + metricApachedruidServerviewSyncUnstableTime metricApachedruidServerviewSyncUnstableTime + metricApachedruidSQLQueryBytes metricApachedruidSQLQueryBytes + metricApachedruidSQLQueryPlanningTimeMs metricApachedruidSQLQueryPlanningTimeMs + metricApachedruidSQLQueryTime metricApachedruidSQLQueryTime + metricApachedruidSubqueryByteLimitCount metricApachedruidSubqueryByteLimitCount + metricApachedruidSubqueryFallbackCount metricApachedruidSubqueryFallbackCount + metricApachedruidSubqueryFallbackInsufficientTypeCount metricApachedruidSubqueryFallbackInsufficientTypeCount + metricApachedruidSubqueryFallbackUnknownReasonCount metricApachedruidSubqueryFallbackUnknownReasonCount + metricApachedruidSubqueryRowLimitCount metricApachedruidSubqueryRowLimitCount + metricApachedruidSysCPU metricApachedruidSysCPU + metricApachedruidSysDiskQueue metricApachedruidSysDiskQueue + metricApachedruidSysDiskReadCount metricApachedruidSysDiskReadCount + metricApachedruidSysDiskReadSize metricApachedruidSysDiskReadSize + metricApachedruidSysDiskTransferTime metricApachedruidSysDiskTransferTime + metricApachedruidSysDiskWriteCount metricApachedruidSysDiskWriteCount + metricApachedruidSysDiskWriteSize metricApachedruidSysDiskWriteSize + metricApachedruidSysFsFilesCount metricApachedruidSysFsFilesCount + metricApachedruidSysFsFilesFree metricApachedruidSysFsFilesFree + metricApachedruidSysFsMax metricApachedruidSysFsMax + metricApachedruidSysFsUsed metricApachedruidSysFsUsed + metricApachedruidSysLa1 metricApachedruidSysLa1 + metricApachedruidSysLa15 metricApachedruidSysLa15 + metricApachedruidSysLa5 metricApachedruidSysLa5 + metricApachedruidSysMemFree metricApachedruidSysMemFree + metricApachedruidSysMemMax metricApachedruidSysMemMax + metricApachedruidSysMemUsed metricApachedruidSysMemUsed + metricApachedruidSysNetReadDropped metricApachedruidSysNetReadDropped + metricApachedruidSysNetReadErrors metricApachedruidSysNetReadErrors + metricApachedruidSysNetReadPackets metricApachedruidSysNetReadPackets + metricApachedruidSysNetReadSize metricApachedruidSysNetReadSize + metricApachedruidSysNetWriteCollisions metricApachedruidSysNetWriteCollisions + metricApachedruidSysNetWriteErrors metricApachedruidSysNetWriteErrors + metricApachedruidSysNetWritePackets metricApachedruidSysNetWritePackets + metricApachedruidSysNetWriteSize metricApachedruidSysNetWriteSize + metricApachedruidSysStorageUsed metricApachedruidSysStorageUsed + metricApachedruidSysSwapFree metricApachedruidSysSwapFree + metricApachedruidSysSwapMax metricApachedruidSysSwapMax + metricApachedruidSysSwapPageIn metricApachedruidSysSwapPageIn + metricApachedruidSysSwapPageOut metricApachedruidSysSwapPageOut + metricApachedruidSysTcpv4ActiveOpens metricApachedruidSysTcpv4ActiveOpens + metricApachedruidSysTcpv4AttemptFails metricApachedruidSysTcpv4AttemptFails + metricApachedruidSysTcpv4EstabResets metricApachedruidSysTcpv4EstabResets + metricApachedruidSysTcpv4InErrs metricApachedruidSysTcpv4InErrs + metricApachedruidSysTcpv4InSegs metricApachedruidSysTcpv4InSegs + metricApachedruidSysTcpv4OutRsts metricApachedruidSysTcpv4OutRsts + metricApachedruidSysTcpv4OutSegs metricApachedruidSysTcpv4OutSegs + metricApachedruidSysTcpv4PassiveOpens metricApachedruidSysTcpv4PassiveOpens + metricApachedruidSysTcpv4RetransSegs metricApachedruidSysTcpv4RetransSegs + metricApachedruidSysUptime metricApachedruidSysUptime + metricApachedruidTaskActionBatchAttempts metricApachedruidTaskActionBatchAttempts + metricApachedruidTaskActionBatchQueueTime metricApachedruidTaskActionBatchQueueTime + metricApachedruidTaskActionBatchRunTime metricApachedruidTaskActionBatchRunTime + metricApachedruidTaskActionBatchSize metricApachedruidTaskActionBatchSize + metricApachedruidTaskActionFailedCount metricApachedruidTaskActionFailedCount + metricApachedruidTaskActionLogTime metricApachedruidTaskActionLogTime + metricApachedruidTaskActionRunTime metricApachedruidTaskActionRunTime + metricApachedruidTaskActionSuccessCount metricApachedruidTaskActionSuccessCount + metricApachedruidTaskFailedCount metricApachedruidTaskFailedCount + metricApachedruidTaskPendingCount metricApachedruidTaskPendingCount + metricApachedruidTaskPendingTime metricApachedruidTaskPendingTime + metricApachedruidTaskRunTime metricApachedruidTaskRunTime + metricApachedruidTaskRunningCount metricApachedruidTaskRunningCount + metricApachedruidTaskSegmentAvailabilityWaitTime metricApachedruidTaskSegmentAvailabilityWaitTime + metricApachedruidTaskSuccessCount metricApachedruidTaskSuccessCount + metricApachedruidTaskWaitingCount metricApachedruidTaskWaitingCount + metricApachedruidTaskSlotBlacklistedCount metricApachedruidTaskSlotBlacklistedCount + metricApachedruidTaskSlotIdleCount metricApachedruidTaskSlotIdleCount + metricApachedruidTaskSlotLazyCount metricApachedruidTaskSlotLazyCount + metricApachedruidTaskSlotTotalCount metricApachedruidTaskSlotTotalCount + metricApachedruidTaskSlotUsedCount metricApachedruidTaskSlotUsedCount + metricApachedruidTierHistoricalCount metricApachedruidTierHistoricalCount + metricApachedruidTierReplicationFactor metricApachedruidTierReplicationFactor + metricApachedruidTierRequiredCapacity metricApachedruidTierRequiredCapacity + metricApachedruidTierTotalCapacity metricApachedruidTierTotalCapacity + metricApachedruidWorkerTaskFailedCount metricApachedruidWorkerTaskFailedCount + metricApachedruidWorkerTaskSuccessCount metricApachedruidWorkerTaskSuccessCount + metricApachedruidWorkerTaskSlotIdleCount metricApachedruidWorkerTaskSlotIdleCount + metricApachedruidWorkerTaskSlotTotalCount metricApachedruidWorkerTaskSlotTotalCount + metricApachedruidWorkerTaskSlotUsedCount metricApachedruidWorkerTaskSlotUsedCount + metricApachedruidZkConnected metricApachedruidZkConnected + metricApachedruidZkReconnectTime metricApachedruidZkReconnectTime +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis: newMetricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis(mbc.Metrics.ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis), + metricApachedruidCompactTaskCount: newMetricApachedruidCompactTaskCount(mbc.Metrics.ApachedruidCompactTaskCount), + metricApachedruidCompactTaskAvailableSlotCount: newMetricApachedruidCompactTaskAvailableSlotCount(mbc.Metrics.ApachedruidCompactTaskAvailableSlotCount), + metricApachedruidCompactTaskMaxSlotCount: newMetricApachedruidCompactTaskMaxSlotCount(mbc.Metrics.ApachedruidCompactTaskMaxSlotCount), + metricApachedruidCoordinatorGlobalTime: newMetricApachedruidCoordinatorGlobalTime(mbc.Metrics.ApachedruidCoordinatorGlobalTime), + metricApachedruidCoordinatorTime: newMetricApachedruidCoordinatorTime(mbc.Metrics.ApachedruidCoordinatorTime), + metricApachedruidIngestBytesReceived: newMetricApachedruidIngestBytesReceived(mbc.Metrics.ApachedruidIngestBytesReceived), + metricApachedruidIngestCount: newMetricApachedruidIngestCount(mbc.Metrics.ApachedruidIngestCount), + metricApachedruidIngestEventsBuffered: newMetricApachedruidIngestEventsBuffered(mbc.Metrics.ApachedruidIngestEventsBuffered), + metricApachedruidIngestEventsDuplicate: newMetricApachedruidIngestEventsDuplicate(mbc.Metrics.ApachedruidIngestEventsDuplicate), + metricApachedruidIngestEventsMessageGap: newMetricApachedruidIngestEventsMessageGap(mbc.Metrics.ApachedruidIngestEventsMessageGap), + metricApachedruidIngestEventsProcessed: newMetricApachedruidIngestEventsProcessed(mbc.Metrics.ApachedruidIngestEventsProcessed), + metricApachedruidIngestEventsProcessedWithError: newMetricApachedruidIngestEventsProcessedWithError(mbc.Metrics.ApachedruidIngestEventsProcessedWithError), + metricApachedruidIngestEventsThrownAway: newMetricApachedruidIngestEventsThrownAway(mbc.Metrics.ApachedruidIngestEventsThrownAway), + metricApachedruidIngestEventsUnparseable: newMetricApachedruidIngestEventsUnparseable(mbc.Metrics.ApachedruidIngestEventsUnparseable), + metricApachedruidIngestHandoffCount: newMetricApachedruidIngestHandoffCount(mbc.Metrics.ApachedruidIngestHandoffCount), + metricApachedruidIngestHandoffFailed: newMetricApachedruidIngestHandoffFailed(mbc.Metrics.ApachedruidIngestHandoffFailed), + metricApachedruidIngestHandoffTime: newMetricApachedruidIngestHandoffTime(mbc.Metrics.ApachedruidIngestHandoffTime), + metricApachedruidIngestInputBytes: newMetricApachedruidIngestInputBytes(mbc.Metrics.ApachedruidIngestInputBytes), + metricApachedruidIngestKafkaAvgLag: newMetricApachedruidIngestKafkaAvgLag(mbc.Metrics.ApachedruidIngestKafkaAvgLag), + metricApachedruidIngestKafkaLag: newMetricApachedruidIngestKafkaLag(mbc.Metrics.ApachedruidIngestKafkaLag), + metricApachedruidIngestKafkaMaxLag: newMetricApachedruidIngestKafkaMaxLag(mbc.Metrics.ApachedruidIngestKafkaMaxLag), + metricApachedruidIngestKafkaPartitionLag: newMetricApachedruidIngestKafkaPartitionLag(mbc.Metrics.ApachedruidIngestKafkaPartitionLag), + metricApachedruidIngestKinesisAvgLagTime: newMetricApachedruidIngestKinesisAvgLagTime(mbc.Metrics.ApachedruidIngestKinesisAvgLagTime), + metricApachedruidIngestKinesisLagTime: newMetricApachedruidIngestKinesisLagTime(mbc.Metrics.ApachedruidIngestKinesisLagTime), + metricApachedruidIngestKinesisMaxLagTime: newMetricApachedruidIngestKinesisMaxLagTime(mbc.Metrics.ApachedruidIngestKinesisMaxLagTime), + metricApachedruidIngestKinesisPartitionLagTime: newMetricApachedruidIngestKinesisPartitionLagTime(mbc.Metrics.ApachedruidIngestKinesisPartitionLagTime), + metricApachedruidIngestMergeCPU: newMetricApachedruidIngestMergeCPU(mbc.Metrics.ApachedruidIngestMergeCPU), + metricApachedruidIngestMergeTime: newMetricApachedruidIngestMergeTime(mbc.Metrics.ApachedruidIngestMergeTime), + metricApachedruidIngestNoticesQueueSize: newMetricApachedruidIngestNoticesQueueSize(mbc.Metrics.ApachedruidIngestNoticesQueueSize), + metricApachedruidIngestNoticesTime: newMetricApachedruidIngestNoticesTime(mbc.Metrics.ApachedruidIngestNoticesTime), + metricApachedruidIngestPauseTime: newMetricApachedruidIngestPauseTime(mbc.Metrics.ApachedruidIngestPauseTime), + metricApachedruidIngestPersistsBackPressure: newMetricApachedruidIngestPersistsBackPressure(mbc.Metrics.ApachedruidIngestPersistsBackPressure), + metricApachedruidIngestPersistsCount: newMetricApachedruidIngestPersistsCount(mbc.Metrics.ApachedruidIngestPersistsCount), + metricApachedruidIngestPersistsCPU: newMetricApachedruidIngestPersistsCPU(mbc.Metrics.ApachedruidIngestPersistsCPU), + metricApachedruidIngestPersistsFailed: newMetricApachedruidIngestPersistsFailed(mbc.Metrics.ApachedruidIngestPersistsFailed), + metricApachedruidIngestPersistsTime: newMetricApachedruidIngestPersistsTime(mbc.Metrics.ApachedruidIngestPersistsTime), + metricApachedruidIngestRowsOutput: newMetricApachedruidIngestRowsOutput(mbc.Metrics.ApachedruidIngestRowsOutput), + metricApachedruidIngestSegmentsCount: newMetricApachedruidIngestSegmentsCount(mbc.Metrics.ApachedruidIngestSegmentsCount), + metricApachedruidIngestShuffleBytes: newMetricApachedruidIngestShuffleBytes(mbc.Metrics.ApachedruidIngestShuffleBytes), + metricApachedruidIngestShuffleRequests: newMetricApachedruidIngestShuffleRequests(mbc.Metrics.ApachedruidIngestShuffleRequests), + metricApachedruidIngestSinkCount: newMetricApachedruidIngestSinkCount(mbc.Metrics.ApachedruidIngestSinkCount), + metricApachedruidIngestTombstonesCount: newMetricApachedruidIngestTombstonesCount(mbc.Metrics.ApachedruidIngestTombstonesCount), + metricApachedruidIntervalCompactedCount: newMetricApachedruidIntervalCompactedCount(mbc.Metrics.ApachedruidIntervalCompactedCount), + metricApachedruidIntervalSkipCompactCount: newMetricApachedruidIntervalSkipCompactCount(mbc.Metrics.ApachedruidIntervalSkipCompactCount), + metricApachedruidIntervalWaitCompactCount: newMetricApachedruidIntervalWaitCompactCount(mbc.Metrics.ApachedruidIntervalWaitCompactCount), + metricApachedruidJettyNumOpenConnections: newMetricApachedruidJettyNumOpenConnections(mbc.Metrics.ApachedruidJettyNumOpenConnections), + metricApachedruidJettyThreadPoolBusy: newMetricApachedruidJettyThreadPoolBusy(mbc.Metrics.ApachedruidJettyThreadPoolBusy), + metricApachedruidJettyThreadPoolIdle: newMetricApachedruidJettyThreadPoolIdle(mbc.Metrics.ApachedruidJettyThreadPoolIdle), + metricApachedruidJettyThreadPoolIsLowOnThreads: newMetricApachedruidJettyThreadPoolIsLowOnThreads(mbc.Metrics.ApachedruidJettyThreadPoolIsLowOnThreads), + metricApachedruidJettyThreadPoolMax: newMetricApachedruidJettyThreadPoolMax(mbc.Metrics.ApachedruidJettyThreadPoolMax), + metricApachedruidJettyThreadPoolMin: newMetricApachedruidJettyThreadPoolMin(mbc.Metrics.ApachedruidJettyThreadPoolMin), + metricApachedruidJettyThreadPoolQueueSize: newMetricApachedruidJettyThreadPoolQueueSize(mbc.Metrics.ApachedruidJettyThreadPoolQueueSize), + metricApachedruidJettyThreadPoolTotal: newMetricApachedruidJettyThreadPoolTotal(mbc.Metrics.ApachedruidJettyThreadPoolTotal), + metricApachedruidJvmBufferpoolCapacity: newMetricApachedruidJvmBufferpoolCapacity(mbc.Metrics.ApachedruidJvmBufferpoolCapacity), + metricApachedruidJvmBufferpoolCount: newMetricApachedruidJvmBufferpoolCount(mbc.Metrics.ApachedruidJvmBufferpoolCount), + metricApachedruidJvmBufferpoolUsed: newMetricApachedruidJvmBufferpoolUsed(mbc.Metrics.ApachedruidJvmBufferpoolUsed), + metricApachedruidJvmGcCount: newMetricApachedruidJvmGcCount(mbc.Metrics.ApachedruidJvmGcCount), + metricApachedruidJvmGcCPU: newMetricApachedruidJvmGcCPU(mbc.Metrics.ApachedruidJvmGcCPU), + metricApachedruidJvmMemCommitted: newMetricApachedruidJvmMemCommitted(mbc.Metrics.ApachedruidJvmMemCommitted), + metricApachedruidJvmMemInit: newMetricApachedruidJvmMemInit(mbc.Metrics.ApachedruidJvmMemInit), + metricApachedruidJvmMemMax: newMetricApachedruidJvmMemMax(mbc.Metrics.ApachedruidJvmMemMax), + metricApachedruidJvmMemUsed: newMetricApachedruidJvmMemUsed(mbc.Metrics.ApachedruidJvmMemUsed), + metricApachedruidJvmPoolCommitted: newMetricApachedruidJvmPoolCommitted(mbc.Metrics.ApachedruidJvmPoolCommitted), + metricApachedruidJvmPoolInit: newMetricApachedruidJvmPoolInit(mbc.Metrics.ApachedruidJvmPoolInit), + metricApachedruidJvmPoolMax: newMetricApachedruidJvmPoolMax(mbc.Metrics.ApachedruidJvmPoolMax), + metricApachedruidJvmPoolUsed: newMetricApachedruidJvmPoolUsed(mbc.Metrics.ApachedruidJvmPoolUsed), + metricApachedruidKillPendingSegmentsCount: newMetricApachedruidKillPendingSegmentsCount(mbc.Metrics.ApachedruidKillPendingSegmentsCount), + metricApachedruidKillTaskCount: newMetricApachedruidKillTaskCount(mbc.Metrics.ApachedruidKillTaskCount), + metricApachedruidKillTaskAvailableSlotCount: newMetricApachedruidKillTaskAvailableSlotCount(mbc.Metrics.ApachedruidKillTaskAvailableSlotCount), + metricApachedruidKillTaskMaxSlotCount: newMetricApachedruidKillTaskMaxSlotCount(mbc.Metrics.ApachedruidKillTaskMaxSlotCount), + metricApachedruidMergeBufferPendingRequests: newMetricApachedruidMergeBufferPendingRequests(mbc.Metrics.ApachedruidMergeBufferPendingRequests), + metricApachedruidMetadataKillAuditCount: newMetricApachedruidMetadataKillAuditCount(mbc.Metrics.ApachedruidMetadataKillAuditCount), + metricApachedruidMetadataKillCompactionCount: newMetricApachedruidMetadataKillCompactionCount(mbc.Metrics.ApachedruidMetadataKillCompactionCount), + metricApachedruidMetadataKillDatasourceCount: newMetricApachedruidMetadataKillDatasourceCount(mbc.Metrics.ApachedruidMetadataKillDatasourceCount), + metricApachedruidMetadataKillRuleCount: newMetricApachedruidMetadataKillRuleCount(mbc.Metrics.ApachedruidMetadataKillRuleCount), + metricApachedruidMetadataKillSupervisorCount: newMetricApachedruidMetadataKillSupervisorCount(mbc.Metrics.ApachedruidMetadataKillSupervisorCount), + metricApachedruidMetadatacacheInitTime: newMetricApachedruidMetadatacacheInitTime(mbc.Metrics.ApachedruidMetadatacacheInitTime), + metricApachedruidMetadatacacheRefreshCount: newMetricApachedruidMetadatacacheRefreshCount(mbc.Metrics.ApachedruidMetadatacacheRefreshCount), + metricApachedruidMetadatacacheRefreshTime: newMetricApachedruidMetadatacacheRefreshTime(mbc.Metrics.ApachedruidMetadatacacheRefreshTime), + metricApachedruidQueryByteLimitExceededCount: newMetricApachedruidQueryByteLimitExceededCount(mbc.Metrics.ApachedruidQueryByteLimitExceededCount), + metricApachedruidQueryBytes: newMetricApachedruidQueryBytes(mbc.Metrics.ApachedruidQueryBytes), + metricApachedruidQueryCacheDeltaAverageBytes: newMetricApachedruidQueryCacheDeltaAverageBytes(mbc.Metrics.ApachedruidQueryCacheDeltaAverageBytes), + metricApachedruidQueryCacheDeltaErrors: newMetricApachedruidQueryCacheDeltaErrors(mbc.Metrics.ApachedruidQueryCacheDeltaErrors), + metricApachedruidQueryCacheDeltaEvictions: newMetricApachedruidQueryCacheDeltaEvictions(mbc.Metrics.ApachedruidQueryCacheDeltaEvictions), + metricApachedruidQueryCacheDeltaHitRate: newMetricApachedruidQueryCacheDeltaHitRate(mbc.Metrics.ApachedruidQueryCacheDeltaHitRate), + metricApachedruidQueryCacheDeltaHits: newMetricApachedruidQueryCacheDeltaHits(mbc.Metrics.ApachedruidQueryCacheDeltaHits), + metricApachedruidQueryCacheDeltaMisses: newMetricApachedruidQueryCacheDeltaMisses(mbc.Metrics.ApachedruidQueryCacheDeltaMisses), + metricApachedruidQueryCacheDeltaNumEntries: newMetricApachedruidQueryCacheDeltaNumEntries(mbc.Metrics.ApachedruidQueryCacheDeltaNumEntries), + metricApachedruidQueryCacheDeltaPutError: newMetricApachedruidQueryCacheDeltaPutError(mbc.Metrics.ApachedruidQueryCacheDeltaPutError), + metricApachedruidQueryCacheDeltaPutOk: newMetricApachedruidQueryCacheDeltaPutOk(mbc.Metrics.ApachedruidQueryCacheDeltaPutOk), + metricApachedruidQueryCacheDeltaPutOversized: newMetricApachedruidQueryCacheDeltaPutOversized(mbc.Metrics.ApachedruidQueryCacheDeltaPutOversized), + metricApachedruidQueryCacheDeltaSizeBytes: newMetricApachedruidQueryCacheDeltaSizeBytes(mbc.Metrics.ApachedruidQueryCacheDeltaSizeBytes), + metricApachedruidQueryCacheDeltaTimeouts: newMetricApachedruidQueryCacheDeltaTimeouts(mbc.Metrics.ApachedruidQueryCacheDeltaTimeouts), + metricApachedruidQueryCacheMemcachedDelta: newMetricApachedruidQueryCacheMemcachedDelta(mbc.Metrics.ApachedruidQueryCacheMemcachedDelta), + metricApachedruidQueryCacheMemcachedTotal: newMetricApachedruidQueryCacheMemcachedTotal(mbc.Metrics.ApachedruidQueryCacheMemcachedTotal), + metricApachedruidQueryCacheTotalAverageBytes: newMetricApachedruidQueryCacheTotalAverageBytes(mbc.Metrics.ApachedruidQueryCacheTotalAverageBytes), + metricApachedruidQueryCacheTotalErrors: newMetricApachedruidQueryCacheTotalErrors(mbc.Metrics.ApachedruidQueryCacheTotalErrors), + metricApachedruidQueryCacheTotalEvictions: newMetricApachedruidQueryCacheTotalEvictions(mbc.Metrics.ApachedruidQueryCacheTotalEvictions), + metricApachedruidQueryCacheTotalHitRate: newMetricApachedruidQueryCacheTotalHitRate(mbc.Metrics.ApachedruidQueryCacheTotalHitRate), + metricApachedruidQueryCacheTotalHits: newMetricApachedruidQueryCacheTotalHits(mbc.Metrics.ApachedruidQueryCacheTotalHits), + metricApachedruidQueryCacheTotalMisses: newMetricApachedruidQueryCacheTotalMisses(mbc.Metrics.ApachedruidQueryCacheTotalMisses), + metricApachedruidQueryCacheTotalNumEntries: newMetricApachedruidQueryCacheTotalNumEntries(mbc.Metrics.ApachedruidQueryCacheTotalNumEntries), + metricApachedruidQueryCacheTotalPutError: newMetricApachedruidQueryCacheTotalPutError(mbc.Metrics.ApachedruidQueryCacheTotalPutError), + metricApachedruidQueryCacheTotalPutOk: newMetricApachedruidQueryCacheTotalPutOk(mbc.Metrics.ApachedruidQueryCacheTotalPutOk), + metricApachedruidQueryCacheTotalPutOversized: newMetricApachedruidQueryCacheTotalPutOversized(mbc.Metrics.ApachedruidQueryCacheTotalPutOversized), + metricApachedruidQueryCacheTotalSizeBytes: newMetricApachedruidQueryCacheTotalSizeBytes(mbc.Metrics.ApachedruidQueryCacheTotalSizeBytes), + metricApachedruidQueryCacheTotalTimeouts: newMetricApachedruidQueryCacheTotalTimeouts(mbc.Metrics.ApachedruidQueryCacheTotalTimeouts), + metricApachedruidQueryCount: newMetricApachedruidQueryCount(mbc.Metrics.ApachedruidQueryCount), + metricApachedruidQueryCPUTime: newMetricApachedruidQueryCPUTime(mbc.Metrics.ApachedruidQueryCPUTime), + metricApachedruidQueryFailedCount: newMetricApachedruidQueryFailedCount(mbc.Metrics.ApachedruidQueryFailedCount), + metricApachedruidQueryInterruptedCount: newMetricApachedruidQueryInterruptedCount(mbc.Metrics.ApachedruidQueryInterruptedCount), + metricApachedruidQueryNodeBackpressure: newMetricApachedruidQueryNodeBackpressure(mbc.Metrics.ApachedruidQueryNodeBackpressure), + metricApachedruidQueryNodeBytes: newMetricApachedruidQueryNodeBytes(mbc.Metrics.ApachedruidQueryNodeBytes), + metricApachedruidQueryNodeTime: newMetricApachedruidQueryNodeTime(mbc.Metrics.ApachedruidQueryNodeTime), + metricApachedruidQueryNodeTtfb: newMetricApachedruidQueryNodeTtfb(mbc.Metrics.ApachedruidQueryNodeTtfb), + metricApachedruidQueryPriority: newMetricApachedruidQueryPriority(mbc.Metrics.ApachedruidQueryPriority), + metricApachedruidQueryRowLimitExceededCount: newMetricApachedruidQueryRowLimitExceededCount(mbc.Metrics.ApachedruidQueryRowLimitExceededCount), + metricApachedruidQuerySegmentTime: newMetricApachedruidQuerySegmentTime(mbc.Metrics.ApachedruidQuerySegmentTime), + metricApachedruidQuerySegmentAndCacheTime: newMetricApachedruidQuerySegmentAndCacheTime(mbc.Metrics.ApachedruidQuerySegmentAndCacheTime), + metricApachedruidQuerySegmentsCount: newMetricApachedruidQuerySegmentsCount(mbc.Metrics.ApachedruidQuerySegmentsCount), + metricApachedruidQuerySuccessCount: newMetricApachedruidQuerySuccessCount(mbc.Metrics.ApachedruidQuerySuccessCount), + metricApachedruidQueryTime: newMetricApachedruidQueryTime(mbc.Metrics.ApachedruidQueryTime), + metricApachedruidQueryTimeoutCount: newMetricApachedruidQueryTimeoutCount(mbc.Metrics.ApachedruidQueryTimeoutCount), + metricApachedruidQueryWaitTime: newMetricApachedruidQueryWaitTime(mbc.Metrics.ApachedruidQueryWaitTime), + metricApachedruidSegmentAddedBytes: newMetricApachedruidSegmentAddedBytes(mbc.Metrics.ApachedruidSegmentAddedBytes), + metricApachedruidSegmentAssignSkippedCount: newMetricApachedruidSegmentAssignSkippedCount(mbc.Metrics.ApachedruidSegmentAssignSkippedCount), + metricApachedruidSegmentAssignedCount: newMetricApachedruidSegmentAssignedCount(mbc.Metrics.ApachedruidSegmentAssignedCount), + metricApachedruidSegmentCompactedBytes: newMetricApachedruidSegmentCompactedBytes(mbc.Metrics.ApachedruidSegmentCompactedBytes), + metricApachedruidSegmentCompactedCount: newMetricApachedruidSegmentCompactedCount(mbc.Metrics.ApachedruidSegmentCompactedCount), + metricApachedruidSegmentCount: newMetricApachedruidSegmentCount(mbc.Metrics.ApachedruidSegmentCount), + metricApachedruidSegmentDeletedCount: newMetricApachedruidSegmentDeletedCount(mbc.Metrics.ApachedruidSegmentDeletedCount), + metricApachedruidSegmentDropQueueCount: newMetricApachedruidSegmentDropQueueCount(mbc.Metrics.ApachedruidSegmentDropQueueCount), + metricApachedruidSegmentDropSkippedCount: newMetricApachedruidSegmentDropSkippedCount(mbc.Metrics.ApachedruidSegmentDropSkippedCount), + metricApachedruidSegmentDroppedCount: newMetricApachedruidSegmentDroppedCount(mbc.Metrics.ApachedruidSegmentDroppedCount), + metricApachedruidSegmentLoadQueueAssigned: newMetricApachedruidSegmentLoadQueueAssigned(mbc.Metrics.ApachedruidSegmentLoadQueueAssigned), + metricApachedruidSegmentLoadQueueCancelled: newMetricApachedruidSegmentLoadQueueCancelled(mbc.Metrics.ApachedruidSegmentLoadQueueCancelled), + metricApachedruidSegmentLoadQueueCount: newMetricApachedruidSegmentLoadQueueCount(mbc.Metrics.ApachedruidSegmentLoadQueueCount), + metricApachedruidSegmentLoadQueueFailed: newMetricApachedruidSegmentLoadQueueFailed(mbc.Metrics.ApachedruidSegmentLoadQueueFailed), + metricApachedruidSegmentLoadQueueSize: newMetricApachedruidSegmentLoadQueueSize(mbc.Metrics.ApachedruidSegmentLoadQueueSize), + metricApachedruidSegmentLoadQueueSuccess: newMetricApachedruidSegmentLoadQueueSuccess(mbc.Metrics.ApachedruidSegmentLoadQueueSuccess), + metricApachedruidSegmentMax: newMetricApachedruidSegmentMax(mbc.Metrics.ApachedruidSegmentMax), + metricApachedruidSegmentMoveSkippedCount: newMetricApachedruidSegmentMoveSkippedCount(mbc.Metrics.ApachedruidSegmentMoveSkippedCount), + metricApachedruidSegmentMovedBytes: newMetricApachedruidSegmentMovedBytes(mbc.Metrics.ApachedruidSegmentMovedBytes), + metricApachedruidSegmentMovedCount: newMetricApachedruidSegmentMovedCount(mbc.Metrics.ApachedruidSegmentMovedCount), + metricApachedruidSegmentNukedBytes: newMetricApachedruidSegmentNukedBytes(mbc.Metrics.ApachedruidSegmentNukedBytes), + metricApachedruidSegmentOverShadowedCount: newMetricApachedruidSegmentOverShadowedCount(mbc.Metrics.ApachedruidSegmentOverShadowedCount), + metricApachedruidSegmentPendingDelete: newMetricApachedruidSegmentPendingDelete(mbc.Metrics.ApachedruidSegmentPendingDelete), + metricApachedruidSegmentRowCountAvg: newMetricApachedruidSegmentRowCountAvg(mbc.Metrics.ApachedruidSegmentRowCountAvg), + metricApachedruidSegmentRowCountRangeCount: newMetricApachedruidSegmentRowCountRangeCount(mbc.Metrics.ApachedruidSegmentRowCountRangeCount), + metricApachedruidSegmentScanActive: newMetricApachedruidSegmentScanActive(mbc.Metrics.ApachedruidSegmentScanActive), + metricApachedruidSegmentScanPending: newMetricApachedruidSegmentScanPending(mbc.Metrics.ApachedruidSegmentScanPending), + metricApachedruidSegmentSize: newMetricApachedruidSegmentSize(mbc.Metrics.ApachedruidSegmentSize), + metricApachedruidSegmentSkipCompactBytes: newMetricApachedruidSegmentSkipCompactBytes(mbc.Metrics.ApachedruidSegmentSkipCompactBytes), + metricApachedruidSegmentSkipCompactCount: newMetricApachedruidSegmentSkipCompactCount(mbc.Metrics.ApachedruidSegmentSkipCompactCount), + metricApachedruidSegmentUnavailableCount: newMetricApachedruidSegmentUnavailableCount(mbc.Metrics.ApachedruidSegmentUnavailableCount), + metricApachedruidSegmentUnderReplicatedCount: newMetricApachedruidSegmentUnderReplicatedCount(mbc.Metrics.ApachedruidSegmentUnderReplicatedCount), + metricApachedruidSegmentUnneededCount: newMetricApachedruidSegmentUnneededCount(mbc.Metrics.ApachedruidSegmentUnneededCount), + metricApachedruidSegmentUsed: newMetricApachedruidSegmentUsed(mbc.Metrics.ApachedruidSegmentUsed), + metricApachedruidSegmentUsedPercent: newMetricApachedruidSegmentUsedPercent(mbc.Metrics.ApachedruidSegmentUsedPercent), + metricApachedruidSegmentWaitCompactBytes: newMetricApachedruidSegmentWaitCompactBytes(mbc.Metrics.ApachedruidSegmentWaitCompactBytes), + metricApachedruidSegmentWaitCompactCount: newMetricApachedruidSegmentWaitCompactCount(mbc.Metrics.ApachedruidSegmentWaitCompactCount), + metricApachedruidServerviewInitTime: newMetricApachedruidServerviewInitTime(mbc.Metrics.ApachedruidServerviewInitTime), + metricApachedruidServerviewSyncHealthy: newMetricApachedruidServerviewSyncHealthy(mbc.Metrics.ApachedruidServerviewSyncHealthy), + metricApachedruidServerviewSyncUnstableTime: newMetricApachedruidServerviewSyncUnstableTime(mbc.Metrics.ApachedruidServerviewSyncUnstableTime), + metricApachedruidSQLQueryBytes: newMetricApachedruidSQLQueryBytes(mbc.Metrics.ApachedruidSQLQueryBytes), + metricApachedruidSQLQueryPlanningTimeMs: newMetricApachedruidSQLQueryPlanningTimeMs(mbc.Metrics.ApachedruidSQLQueryPlanningTimeMs), + metricApachedruidSQLQueryTime: newMetricApachedruidSQLQueryTime(mbc.Metrics.ApachedruidSQLQueryTime), + metricApachedruidSubqueryByteLimitCount: newMetricApachedruidSubqueryByteLimitCount(mbc.Metrics.ApachedruidSubqueryByteLimitCount), + metricApachedruidSubqueryFallbackCount: newMetricApachedruidSubqueryFallbackCount(mbc.Metrics.ApachedruidSubqueryFallbackCount), + metricApachedruidSubqueryFallbackInsufficientTypeCount: newMetricApachedruidSubqueryFallbackInsufficientTypeCount(mbc.Metrics.ApachedruidSubqueryFallbackInsufficientTypeCount), + metricApachedruidSubqueryFallbackUnknownReasonCount: newMetricApachedruidSubqueryFallbackUnknownReasonCount(mbc.Metrics.ApachedruidSubqueryFallbackUnknownReasonCount), + metricApachedruidSubqueryRowLimitCount: newMetricApachedruidSubqueryRowLimitCount(mbc.Metrics.ApachedruidSubqueryRowLimitCount), + metricApachedruidSysCPU: newMetricApachedruidSysCPU(mbc.Metrics.ApachedruidSysCPU), + metricApachedruidSysDiskQueue: newMetricApachedruidSysDiskQueue(mbc.Metrics.ApachedruidSysDiskQueue), + metricApachedruidSysDiskReadCount: newMetricApachedruidSysDiskReadCount(mbc.Metrics.ApachedruidSysDiskReadCount), + metricApachedruidSysDiskReadSize: newMetricApachedruidSysDiskReadSize(mbc.Metrics.ApachedruidSysDiskReadSize), + metricApachedruidSysDiskTransferTime: newMetricApachedruidSysDiskTransferTime(mbc.Metrics.ApachedruidSysDiskTransferTime), + metricApachedruidSysDiskWriteCount: newMetricApachedruidSysDiskWriteCount(mbc.Metrics.ApachedruidSysDiskWriteCount), + metricApachedruidSysDiskWriteSize: newMetricApachedruidSysDiskWriteSize(mbc.Metrics.ApachedruidSysDiskWriteSize), + metricApachedruidSysFsFilesCount: newMetricApachedruidSysFsFilesCount(mbc.Metrics.ApachedruidSysFsFilesCount), + metricApachedruidSysFsFilesFree: newMetricApachedruidSysFsFilesFree(mbc.Metrics.ApachedruidSysFsFilesFree), + metricApachedruidSysFsMax: newMetricApachedruidSysFsMax(mbc.Metrics.ApachedruidSysFsMax), + metricApachedruidSysFsUsed: newMetricApachedruidSysFsUsed(mbc.Metrics.ApachedruidSysFsUsed), + metricApachedruidSysLa1: newMetricApachedruidSysLa1(mbc.Metrics.ApachedruidSysLa1), + metricApachedruidSysLa15: newMetricApachedruidSysLa15(mbc.Metrics.ApachedruidSysLa15), + metricApachedruidSysLa5: newMetricApachedruidSysLa5(mbc.Metrics.ApachedruidSysLa5), + metricApachedruidSysMemFree: newMetricApachedruidSysMemFree(mbc.Metrics.ApachedruidSysMemFree), + metricApachedruidSysMemMax: newMetricApachedruidSysMemMax(mbc.Metrics.ApachedruidSysMemMax), + metricApachedruidSysMemUsed: newMetricApachedruidSysMemUsed(mbc.Metrics.ApachedruidSysMemUsed), + metricApachedruidSysNetReadDropped: newMetricApachedruidSysNetReadDropped(mbc.Metrics.ApachedruidSysNetReadDropped), + metricApachedruidSysNetReadErrors: newMetricApachedruidSysNetReadErrors(mbc.Metrics.ApachedruidSysNetReadErrors), + metricApachedruidSysNetReadPackets: newMetricApachedruidSysNetReadPackets(mbc.Metrics.ApachedruidSysNetReadPackets), + metricApachedruidSysNetReadSize: newMetricApachedruidSysNetReadSize(mbc.Metrics.ApachedruidSysNetReadSize), + metricApachedruidSysNetWriteCollisions: newMetricApachedruidSysNetWriteCollisions(mbc.Metrics.ApachedruidSysNetWriteCollisions), + metricApachedruidSysNetWriteErrors: newMetricApachedruidSysNetWriteErrors(mbc.Metrics.ApachedruidSysNetWriteErrors), + metricApachedruidSysNetWritePackets: newMetricApachedruidSysNetWritePackets(mbc.Metrics.ApachedruidSysNetWritePackets), + metricApachedruidSysNetWriteSize: newMetricApachedruidSysNetWriteSize(mbc.Metrics.ApachedruidSysNetWriteSize), + metricApachedruidSysStorageUsed: newMetricApachedruidSysStorageUsed(mbc.Metrics.ApachedruidSysStorageUsed), + metricApachedruidSysSwapFree: newMetricApachedruidSysSwapFree(mbc.Metrics.ApachedruidSysSwapFree), + metricApachedruidSysSwapMax: newMetricApachedruidSysSwapMax(mbc.Metrics.ApachedruidSysSwapMax), + metricApachedruidSysSwapPageIn: newMetricApachedruidSysSwapPageIn(mbc.Metrics.ApachedruidSysSwapPageIn), + metricApachedruidSysSwapPageOut: newMetricApachedruidSysSwapPageOut(mbc.Metrics.ApachedruidSysSwapPageOut), + metricApachedruidSysTcpv4ActiveOpens: newMetricApachedruidSysTcpv4ActiveOpens(mbc.Metrics.ApachedruidSysTcpv4ActiveOpens), + metricApachedruidSysTcpv4AttemptFails: newMetricApachedruidSysTcpv4AttemptFails(mbc.Metrics.ApachedruidSysTcpv4AttemptFails), + metricApachedruidSysTcpv4EstabResets: newMetricApachedruidSysTcpv4EstabResets(mbc.Metrics.ApachedruidSysTcpv4EstabResets), + metricApachedruidSysTcpv4InErrs: newMetricApachedruidSysTcpv4InErrs(mbc.Metrics.ApachedruidSysTcpv4InErrs), + metricApachedruidSysTcpv4InSegs: newMetricApachedruidSysTcpv4InSegs(mbc.Metrics.ApachedruidSysTcpv4InSegs), + metricApachedruidSysTcpv4OutRsts: newMetricApachedruidSysTcpv4OutRsts(mbc.Metrics.ApachedruidSysTcpv4OutRsts), + metricApachedruidSysTcpv4OutSegs: newMetricApachedruidSysTcpv4OutSegs(mbc.Metrics.ApachedruidSysTcpv4OutSegs), + metricApachedruidSysTcpv4PassiveOpens: newMetricApachedruidSysTcpv4PassiveOpens(mbc.Metrics.ApachedruidSysTcpv4PassiveOpens), + metricApachedruidSysTcpv4RetransSegs: newMetricApachedruidSysTcpv4RetransSegs(mbc.Metrics.ApachedruidSysTcpv4RetransSegs), + metricApachedruidSysUptime: newMetricApachedruidSysUptime(mbc.Metrics.ApachedruidSysUptime), + metricApachedruidTaskActionBatchAttempts: newMetricApachedruidTaskActionBatchAttempts(mbc.Metrics.ApachedruidTaskActionBatchAttempts), + metricApachedruidTaskActionBatchQueueTime: newMetricApachedruidTaskActionBatchQueueTime(mbc.Metrics.ApachedruidTaskActionBatchQueueTime), + metricApachedruidTaskActionBatchRunTime: newMetricApachedruidTaskActionBatchRunTime(mbc.Metrics.ApachedruidTaskActionBatchRunTime), + metricApachedruidTaskActionBatchSize: newMetricApachedruidTaskActionBatchSize(mbc.Metrics.ApachedruidTaskActionBatchSize), + metricApachedruidTaskActionFailedCount: newMetricApachedruidTaskActionFailedCount(mbc.Metrics.ApachedruidTaskActionFailedCount), + metricApachedruidTaskActionLogTime: newMetricApachedruidTaskActionLogTime(mbc.Metrics.ApachedruidTaskActionLogTime), + metricApachedruidTaskActionRunTime: newMetricApachedruidTaskActionRunTime(mbc.Metrics.ApachedruidTaskActionRunTime), + metricApachedruidTaskActionSuccessCount: newMetricApachedruidTaskActionSuccessCount(mbc.Metrics.ApachedruidTaskActionSuccessCount), + metricApachedruidTaskFailedCount: newMetricApachedruidTaskFailedCount(mbc.Metrics.ApachedruidTaskFailedCount), + metricApachedruidTaskPendingCount: newMetricApachedruidTaskPendingCount(mbc.Metrics.ApachedruidTaskPendingCount), + metricApachedruidTaskPendingTime: newMetricApachedruidTaskPendingTime(mbc.Metrics.ApachedruidTaskPendingTime), + metricApachedruidTaskRunTime: newMetricApachedruidTaskRunTime(mbc.Metrics.ApachedruidTaskRunTime), + metricApachedruidTaskRunningCount: newMetricApachedruidTaskRunningCount(mbc.Metrics.ApachedruidTaskRunningCount), + metricApachedruidTaskSegmentAvailabilityWaitTime: newMetricApachedruidTaskSegmentAvailabilityWaitTime(mbc.Metrics.ApachedruidTaskSegmentAvailabilityWaitTime), + metricApachedruidTaskSuccessCount: newMetricApachedruidTaskSuccessCount(mbc.Metrics.ApachedruidTaskSuccessCount), + metricApachedruidTaskWaitingCount: newMetricApachedruidTaskWaitingCount(mbc.Metrics.ApachedruidTaskWaitingCount), + metricApachedruidTaskSlotBlacklistedCount: newMetricApachedruidTaskSlotBlacklistedCount(mbc.Metrics.ApachedruidTaskSlotBlacklistedCount), + metricApachedruidTaskSlotIdleCount: newMetricApachedruidTaskSlotIdleCount(mbc.Metrics.ApachedruidTaskSlotIdleCount), + metricApachedruidTaskSlotLazyCount: newMetricApachedruidTaskSlotLazyCount(mbc.Metrics.ApachedruidTaskSlotLazyCount), + metricApachedruidTaskSlotTotalCount: newMetricApachedruidTaskSlotTotalCount(mbc.Metrics.ApachedruidTaskSlotTotalCount), + metricApachedruidTaskSlotUsedCount: newMetricApachedruidTaskSlotUsedCount(mbc.Metrics.ApachedruidTaskSlotUsedCount), + metricApachedruidTierHistoricalCount: newMetricApachedruidTierHistoricalCount(mbc.Metrics.ApachedruidTierHistoricalCount), + metricApachedruidTierReplicationFactor: newMetricApachedruidTierReplicationFactor(mbc.Metrics.ApachedruidTierReplicationFactor), + metricApachedruidTierRequiredCapacity: newMetricApachedruidTierRequiredCapacity(mbc.Metrics.ApachedruidTierRequiredCapacity), + metricApachedruidTierTotalCapacity: newMetricApachedruidTierTotalCapacity(mbc.Metrics.ApachedruidTierTotalCapacity), + metricApachedruidWorkerTaskFailedCount: newMetricApachedruidWorkerTaskFailedCount(mbc.Metrics.ApachedruidWorkerTaskFailedCount), + metricApachedruidWorkerTaskSuccessCount: newMetricApachedruidWorkerTaskSuccessCount(mbc.Metrics.ApachedruidWorkerTaskSuccessCount), + metricApachedruidWorkerTaskSlotIdleCount: newMetricApachedruidWorkerTaskSlotIdleCount(mbc.Metrics.ApachedruidWorkerTaskSlotIdleCount), + metricApachedruidWorkerTaskSlotTotalCount: newMetricApachedruidWorkerTaskSlotTotalCount(mbc.Metrics.ApachedruidWorkerTaskSlotTotalCount), + metricApachedruidWorkerTaskSlotUsedCount: newMetricApachedruidWorkerTaskSlotUsedCount(mbc.Metrics.ApachedruidWorkerTaskSlotUsedCount), + metricApachedruidZkConnected: newMetricApachedruidZkConnected(mbc.Metrics.ApachedruidZkConnected), + metricApachedruidZkReconnectTime: newMetricApachedruidZkReconnectTime(mbc.Metrics.ApachedruidZkReconnectTime), + } + for _, op := range options { + op(mb) + } + return mb +} + +// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. +func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { + return NewResourceBuilder(mb.config.ResourceAttributes) +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } +} + +// ResourceMetricsOption applies changes to provided resource metrics. +type ResourceMetricsOption func(pmetric.ResourceMetrics) + +// WithResource sets the provided resource on the emitted ResourceMetrics. +// It's recommended to use ResourceBuilder to create the resource. +func WithResource(res pcommon.Resource) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + res.CopyTo(rm.Resource()) + } +} + +// WithStartTimeOverride overrides start time for all the resource metrics data points. +// This option should be only used if different start time has to be set on metrics coming from different resources. +func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + var dps pmetric.NumberDataPointSlice + metrics := rm.ScopeMetrics().At(0).Metrics() + for i := 0; i < metrics.Len(); i++ { + switch metrics.At(i).Type() { + case pmetric.MetricTypeGauge: + dps = metrics.At(i).Gauge().DataPoints() + case pmetric.MetricTypeSum: + dps = metrics.At(i).Sum().DataPoints() + } + for j := 0; j < dps.Len(); j++ { + dps.At(j).SetStartTimestamp(start) + } + } + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. +// Resource attributes should be provided as ResourceMetricsOption arguments. +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { + rm := pmetric.NewResourceMetrics() + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver") + ils.Scope().SetVersion(mb.buildInfo.Version) + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis.emit(ils.Metrics()) + mb.metricApachedruidCompactTaskCount.emit(ils.Metrics()) + mb.metricApachedruidCompactTaskAvailableSlotCount.emit(ils.Metrics()) + mb.metricApachedruidCompactTaskMaxSlotCount.emit(ils.Metrics()) + mb.metricApachedruidCoordinatorGlobalTime.emit(ils.Metrics()) + mb.metricApachedruidCoordinatorTime.emit(ils.Metrics()) + mb.metricApachedruidIngestBytesReceived.emit(ils.Metrics()) + mb.metricApachedruidIngestCount.emit(ils.Metrics()) + mb.metricApachedruidIngestEventsBuffered.emit(ils.Metrics()) + mb.metricApachedruidIngestEventsDuplicate.emit(ils.Metrics()) + mb.metricApachedruidIngestEventsMessageGap.emit(ils.Metrics()) + mb.metricApachedruidIngestEventsProcessed.emit(ils.Metrics()) + mb.metricApachedruidIngestEventsProcessedWithError.emit(ils.Metrics()) + mb.metricApachedruidIngestEventsThrownAway.emit(ils.Metrics()) + mb.metricApachedruidIngestEventsUnparseable.emit(ils.Metrics()) + mb.metricApachedruidIngestHandoffCount.emit(ils.Metrics()) + mb.metricApachedruidIngestHandoffFailed.emit(ils.Metrics()) + mb.metricApachedruidIngestHandoffTime.emit(ils.Metrics()) + mb.metricApachedruidIngestInputBytes.emit(ils.Metrics()) + mb.metricApachedruidIngestKafkaAvgLag.emit(ils.Metrics()) + mb.metricApachedruidIngestKafkaLag.emit(ils.Metrics()) + mb.metricApachedruidIngestKafkaMaxLag.emit(ils.Metrics()) + mb.metricApachedruidIngestKafkaPartitionLag.emit(ils.Metrics()) + mb.metricApachedruidIngestKinesisAvgLagTime.emit(ils.Metrics()) + mb.metricApachedruidIngestKinesisLagTime.emit(ils.Metrics()) + mb.metricApachedruidIngestKinesisMaxLagTime.emit(ils.Metrics()) + mb.metricApachedruidIngestKinesisPartitionLagTime.emit(ils.Metrics()) + mb.metricApachedruidIngestMergeCPU.emit(ils.Metrics()) + mb.metricApachedruidIngestMergeTime.emit(ils.Metrics()) + mb.metricApachedruidIngestNoticesQueueSize.emit(ils.Metrics()) + mb.metricApachedruidIngestNoticesTime.emit(ils.Metrics()) + mb.metricApachedruidIngestPauseTime.emit(ils.Metrics()) + mb.metricApachedruidIngestPersistsBackPressure.emit(ils.Metrics()) + mb.metricApachedruidIngestPersistsCount.emit(ils.Metrics()) + mb.metricApachedruidIngestPersistsCPU.emit(ils.Metrics()) + mb.metricApachedruidIngestPersistsFailed.emit(ils.Metrics()) + mb.metricApachedruidIngestPersistsTime.emit(ils.Metrics()) + mb.metricApachedruidIngestRowsOutput.emit(ils.Metrics()) + mb.metricApachedruidIngestSegmentsCount.emit(ils.Metrics()) + mb.metricApachedruidIngestShuffleBytes.emit(ils.Metrics()) + mb.metricApachedruidIngestShuffleRequests.emit(ils.Metrics()) + mb.metricApachedruidIngestSinkCount.emit(ils.Metrics()) + mb.metricApachedruidIngestTombstonesCount.emit(ils.Metrics()) + mb.metricApachedruidIntervalCompactedCount.emit(ils.Metrics()) + mb.metricApachedruidIntervalSkipCompactCount.emit(ils.Metrics()) + mb.metricApachedruidIntervalWaitCompactCount.emit(ils.Metrics()) + mb.metricApachedruidJettyNumOpenConnections.emit(ils.Metrics()) + mb.metricApachedruidJettyThreadPoolBusy.emit(ils.Metrics()) + mb.metricApachedruidJettyThreadPoolIdle.emit(ils.Metrics()) + mb.metricApachedruidJettyThreadPoolIsLowOnThreads.emit(ils.Metrics()) + mb.metricApachedruidJettyThreadPoolMax.emit(ils.Metrics()) + mb.metricApachedruidJettyThreadPoolMin.emit(ils.Metrics()) + mb.metricApachedruidJettyThreadPoolQueueSize.emit(ils.Metrics()) + mb.metricApachedruidJettyThreadPoolTotal.emit(ils.Metrics()) + mb.metricApachedruidJvmBufferpoolCapacity.emit(ils.Metrics()) + mb.metricApachedruidJvmBufferpoolCount.emit(ils.Metrics()) + mb.metricApachedruidJvmBufferpoolUsed.emit(ils.Metrics()) + mb.metricApachedruidJvmGcCount.emit(ils.Metrics()) + mb.metricApachedruidJvmGcCPU.emit(ils.Metrics()) + mb.metricApachedruidJvmMemCommitted.emit(ils.Metrics()) + mb.metricApachedruidJvmMemInit.emit(ils.Metrics()) + mb.metricApachedruidJvmMemMax.emit(ils.Metrics()) + mb.metricApachedruidJvmMemUsed.emit(ils.Metrics()) + mb.metricApachedruidJvmPoolCommitted.emit(ils.Metrics()) + mb.metricApachedruidJvmPoolInit.emit(ils.Metrics()) + mb.metricApachedruidJvmPoolMax.emit(ils.Metrics()) + mb.metricApachedruidJvmPoolUsed.emit(ils.Metrics()) + mb.metricApachedruidKillPendingSegmentsCount.emit(ils.Metrics()) + mb.metricApachedruidKillTaskCount.emit(ils.Metrics()) + mb.metricApachedruidKillTaskAvailableSlotCount.emit(ils.Metrics()) + mb.metricApachedruidKillTaskMaxSlotCount.emit(ils.Metrics()) + mb.metricApachedruidMergeBufferPendingRequests.emit(ils.Metrics()) + mb.metricApachedruidMetadataKillAuditCount.emit(ils.Metrics()) + mb.metricApachedruidMetadataKillCompactionCount.emit(ils.Metrics()) + mb.metricApachedruidMetadataKillDatasourceCount.emit(ils.Metrics()) + mb.metricApachedruidMetadataKillRuleCount.emit(ils.Metrics()) + mb.metricApachedruidMetadataKillSupervisorCount.emit(ils.Metrics()) + mb.metricApachedruidMetadatacacheInitTime.emit(ils.Metrics()) + mb.metricApachedruidMetadatacacheRefreshCount.emit(ils.Metrics()) + mb.metricApachedruidMetadatacacheRefreshTime.emit(ils.Metrics()) + mb.metricApachedruidQueryByteLimitExceededCount.emit(ils.Metrics()) + mb.metricApachedruidQueryBytes.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaAverageBytes.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaErrors.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaEvictions.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaHitRate.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaHits.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaMisses.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaNumEntries.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaPutError.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaPutOk.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaPutOversized.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaSizeBytes.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheDeltaTimeouts.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheMemcachedDelta.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheMemcachedTotal.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalAverageBytes.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalErrors.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalEvictions.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalHitRate.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalHits.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalMisses.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalNumEntries.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalPutError.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalPutOk.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalPutOversized.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalSizeBytes.emit(ils.Metrics()) + mb.metricApachedruidQueryCacheTotalTimeouts.emit(ils.Metrics()) + mb.metricApachedruidQueryCount.emit(ils.Metrics()) + mb.metricApachedruidQueryCPUTime.emit(ils.Metrics()) + mb.metricApachedruidQueryFailedCount.emit(ils.Metrics()) + mb.metricApachedruidQueryInterruptedCount.emit(ils.Metrics()) + mb.metricApachedruidQueryNodeBackpressure.emit(ils.Metrics()) + mb.metricApachedruidQueryNodeBytes.emit(ils.Metrics()) + mb.metricApachedruidQueryNodeTime.emit(ils.Metrics()) + mb.metricApachedruidQueryNodeTtfb.emit(ils.Metrics()) + mb.metricApachedruidQueryPriority.emit(ils.Metrics()) + mb.metricApachedruidQueryRowLimitExceededCount.emit(ils.Metrics()) + mb.metricApachedruidQuerySegmentTime.emit(ils.Metrics()) + mb.metricApachedruidQuerySegmentAndCacheTime.emit(ils.Metrics()) + mb.metricApachedruidQuerySegmentsCount.emit(ils.Metrics()) + mb.metricApachedruidQuerySuccessCount.emit(ils.Metrics()) + mb.metricApachedruidQueryTime.emit(ils.Metrics()) + mb.metricApachedruidQueryTimeoutCount.emit(ils.Metrics()) + mb.metricApachedruidQueryWaitTime.emit(ils.Metrics()) + mb.metricApachedruidSegmentAddedBytes.emit(ils.Metrics()) + mb.metricApachedruidSegmentAssignSkippedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentAssignedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentCompactedBytes.emit(ils.Metrics()) + mb.metricApachedruidSegmentCompactedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentDeletedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentDropQueueCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentDropSkippedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentDroppedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentLoadQueueAssigned.emit(ils.Metrics()) + mb.metricApachedruidSegmentLoadQueueCancelled.emit(ils.Metrics()) + mb.metricApachedruidSegmentLoadQueueCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentLoadQueueFailed.emit(ils.Metrics()) + mb.metricApachedruidSegmentLoadQueueSize.emit(ils.Metrics()) + mb.metricApachedruidSegmentLoadQueueSuccess.emit(ils.Metrics()) + mb.metricApachedruidSegmentMax.emit(ils.Metrics()) + mb.metricApachedruidSegmentMoveSkippedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentMovedBytes.emit(ils.Metrics()) + mb.metricApachedruidSegmentMovedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentNukedBytes.emit(ils.Metrics()) + mb.metricApachedruidSegmentOverShadowedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentPendingDelete.emit(ils.Metrics()) + mb.metricApachedruidSegmentRowCountAvg.emit(ils.Metrics()) + mb.metricApachedruidSegmentRowCountRangeCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentScanActive.emit(ils.Metrics()) + mb.metricApachedruidSegmentScanPending.emit(ils.Metrics()) + mb.metricApachedruidSegmentSize.emit(ils.Metrics()) + mb.metricApachedruidSegmentSkipCompactBytes.emit(ils.Metrics()) + mb.metricApachedruidSegmentSkipCompactCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentUnavailableCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentUnderReplicatedCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentUnneededCount.emit(ils.Metrics()) + mb.metricApachedruidSegmentUsed.emit(ils.Metrics()) + mb.metricApachedruidSegmentUsedPercent.emit(ils.Metrics()) + mb.metricApachedruidSegmentWaitCompactBytes.emit(ils.Metrics()) + mb.metricApachedruidSegmentWaitCompactCount.emit(ils.Metrics()) + mb.metricApachedruidServerviewInitTime.emit(ils.Metrics()) + mb.metricApachedruidServerviewSyncHealthy.emit(ils.Metrics()) + mb.metricApachedruidServerviewSyncUnstableTime.emit(ils.Metrics()) + mb.metricApachedruidSQLQueryBytes.emit(ils.Metrics()) + mb.metricApachedruidSQLQueryPlanningTimeMs.emit(ils.Metrics()) + mb.metricApachedruidSQLQueryTime.emit(ils.Metrics()) + mb.metricApachedruidSubqueryByteLimitCount.emit(ils.Metrics()) + mb.metricApachedruidSubqueryFallbackCount.emit(ils.Metrics()) + mb.metricApachedruidSubqueryFallbackInsufficientTypeCount.emit(ils.Metrics()) + mb.metricApachedruidSubqueryFallbackUnknownReasonCount.emit(ils.Metrics()) + mb.metricApachedruidSubqueryRowLimitCount.emit(ils.Metrics()) + mb.metricApachedruidSysCPU.emit(ils.Metrics()) + mb.metricApachedruidSysDiskQueue.emit(ils.Metrics()) + mb.metricApachedruidSysDiskReadCount.emit(ils.Metrics()) + mb.metricApachedruidSysDiskReadSize.emit(ils.Metrics()) + mb.metricApachedruidSysDiskTransferTime.emit(ils.Metrics()) + mb.metricApachedruidSysDiskWriteCount.emit(ils.Metrics()) + mb.metricApachedruidSysDiskWriteSize.emit(ils.Metrics()) + mb.metricApachedruidSysFsFilesCount.emit(ils.Metrics()) + mb.metricApachedruidSysFsFilesFree.emit(ils.Metrics()) + mb.metricApachedruidSysFsMax.emit(ils.Metrics()) + mb.metricApachedruidSysFsUsed.emit(ils.Metrics()) + mb.metricApachedruidSysLa1.emit(ils.Metrics()) + mb.metricApachedruidSysLa15.emit(ils.Metrics()) + mb.metricApachedruidSysLa5.emit(ils.Metrics()) + mb.metricApachedruidSysMemFree.emit(ils.Metrics()) + mb.metricApachedruidSysMemMax.emit(ils.Metrics()) + mb.metricApachedruidSysMemUsed.emit(ils.Metrics()) + mb.metricApachedruidSysNetReadDropped.emit(ils.Metrics()) + mb.metricApachedruidSysNetReadErrors.emit(ils.Metrics()) + mb.metricApachedruidSysNetReadPackets.emit(ils.Metrics()) + mb.metricApachedruidSysNetReadSize.emit(ils.Metrics()) + mb.metricApachedruidSysNetWriteCollisions.emit(ils.Metrics()) + mb.metricApachedruidSysNetWriteErrors.emit(ils.Metrics()) + mb.metricApachedruidSysNetWritePackets.emit(ils.Metrics()) + mb.metricApachedruidSysNetWriteSize.emit(ils.Metrics()) + mb.metricApachedruidSysStorageUsed.emit(ils.Metrics()) + mb.metricApachedruidSysSwapFree.emit(ils.Metrics()) + mb.metricApachedruidSysSwapMax.emit(ils.Metrics()) + mb.metricApachedruidSysSwapPageIn.emit(ils.Metrics()) + mb.metricApachedruidSysSwapPageOut.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4ActiveOpens.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4AttemptFails.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4EstabResets.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4InErrs.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4InSegs.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4OutRsts.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4OutSegs.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4PassiveOpens.emit(ils.Metrics()) + mb.metricApachedruidSysTcpv4RetransSegs.emit(ils.Metrics()) + mb.metricApachedruidSysUptime.emit(ils.Metrics()) + mb.metricApachedruidTaskActionBatchAttempts.emit(ils.Metrics()) + mb.metricApachedruidTaskActionBatchQueueTime.emit(ils.Metrics()) + mb.metricApachedruidTaskActionBatchRunTime.emit(ils.Metrics()) + mb.metricApachedruidTaskActionBatchSize.emit(ils.Metrics()) + mb.metricApachedruidTaskActionFailedCount.emit(ils.Metrics()) + mb.metricApachedruidTaskActionLogTime.emit(ils.Metrics()) + mb.metricApachedruidTaskActionRunTime.emit(ils.Metrics()) + mb.metricApachedruidTaskActionSuccessCount.emit(ils.Metrics()) + mb.metricApachedruidTaskFailedCount.emit(ils.Metrics()) + mb.metricApachedruidTaskPendingCount.emit(ils.Metrics()) + mb.metricApachedruidTaskPendingTime.emit(ils.Metrics()) + mb.metricApachedruidTaskRunTime.emit(ils.Metrics()) + mb.metricApachedruidTaskRunningCount.emit(ils.Metrics()) + mb.metricApachedruidTaskSegmentAvailabilityWaitTime.emit(ils.Metrics()) + mb.metricApachedruidTaskSuccessCount.emit(ils.Metrics()) + mb.metricApachedruidTaskWaitingCount.emit(ils.Metrics()) + mb.metricApachedruidTaskSlotBlacklistedCount.emit(ils.Metrics()) + mb.metricApachedruidTaskSlotIdleCount.emit(ils.Metrics()) + mb.metricApachedruidTaskSlotLazyCount.emit(ils.Metrics()) + mb.metricApachedruidTaskSlotTotalCount.emit(ils.Metrics()) + mb.metricApachedruidTaskSlotUsedCount.emit(ils.Metrics()) + mb.metricApachedruidTierHistoricalCount.emit(ils.Metrics()) + mb.metricApachedruidTierReplicationFactor.emit(ils.Metrics()) + mb.metricApachedruidTierRequiredCapacity.emit(ils.Metrics()) + mb.metricApachedruidTierTotalCapacity.emit(ils.Metrics()) + mb.metricApachedruidWorkerTaskFailedCount.emit(ils.Metrics()) + mb.metricApachedruidWorkerTaskSuccessCount.emit(ils.Metrics()) + mb.metricApachedruidWorkerTaskSlotIdleCount.emit(ils.Metrics()) + mb.metricApachedruidWorkerTaskSlotTotalCount.emit(ils.Metrics()) + mb.metricApachedruidWorkerTaskSlotUsedCount.emit(ils.Metrics()) + mb.metricApachedruidZkConnected.emit(ils.Metrics()) + mb.metricApachedruidZkReconnectTime.emit(ils.Metrics()) + + for _, op := range rmo { + op(rm) + } + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user config, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) + metrics := mb.metricsBuffer + mb.metricsBuffer = pmetric.NewMetrics() + return metrics +} + +// RecordApachedruidCompactSegmentAnalyzerFetchAndProcessMillisDataPoint adds a data point to apachedruid.compact.segment_analyzer.fetch_and_process_millis metric. +func (mb *MetricsBuilder) RecordApachedruidCompactSegmentAnalyzerFetchAndProcessMillisDataPoint(ts pcommon.Timestamp, val int64, compactTaskTypeAttributeValue string, compactDataSourceAttributeValue string, compactGroupIDAttributeValue string, compactTagsAttributeValue string, compactTaskIDAttributeValue string) { + mb.metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis.recordDataPoint(mb.startTime, ts, val, compactTaskTypeAttributeValue, compactDataSourceAttributeValue, compactGroupIDAttributeValue, compactTagsAttributeValue, compactTaskIDAttributeValue) +} + +// RecordApachedruidCompactTaskCountDataPoint adds a data point to apachedruid.compact.task.count metric. +func (mb *MetricsBuilder) RecordApachedruidCompactTaskCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidCompactTaskCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidCompactTaskAvailableSlotCountDataPoint adds a data point to apachedruid.compact_task.available_slot.count metric. +func (mb *MetricsBuilder) RecordApachedruidCompactTaskAvailableSlotCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidCompactTaskAvailableSlotCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidCompactTaskMaxSlotCountDataPoint adds a data point to apachedruid.compact_task.max_slot.count metric. +func (mb *MetricsBuilder) RecordApachedruidCompactTaskMaxSlotCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidCompactTaskMaxSlotCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidCoordinatorGlobalTimeDataPoint adds a data point to apachedruid.coordinator.global.time metric. +func (mb *MetricsBuilder) RecordApachedruidCoordinatorGlobalTimeDataPoint(ts pcommon.Timestamp, val int64, coordinatorDutyGroupAttributeValue string) { + mb.metricApachedruidCoordinatorGlobalTime.recordDataPoint(mb.startTime, ts, val, coordinatorDutyGroupAttributeValue) +} + +// RecordApachedruidCoordinatorTimeDataPoint adds a data point to apachedruid.coordinator.time metric. +func (mb *MetricsBuilder) RecordApachedruidCoordinatorTimeDataPoint(ts pcommon.Timestamp, val int64, coordinatorDutyAttributeValue string) { + mb.metricApachedruidCoordinatorTime.recordDataPoint(mb.startTime, ts, val, coordinatorDutyAttributeValue) +} + +// RecordApachedruidIngestBytesReceivedDataPoint adds a data point to apachedruid.ingest.bytes.received metric. +func (mb *MetricsBuilder) RecordApachedruidIngestBytesReceivedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string, ingestServiceNameAttributeValue string) { + mb.metricApachedruidIngestBytesReceived.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestTaskIDAttributeValue, ingestDataSourceAttributeValue, ingestServiceNameAttributeValue) +} + +// RecordApachedruidIngestCountDataPoint adds a data point to apachedruid.ingest.count metric. +func (mb *MetricsBuilder) RecordApachedruidIngestCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { + mb.metricApachedruidIngestCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue, ingestTaskIngestionModeAttributeValue) +} + +// RecordApachedruidIngestEventsBufferedDataPoint adds a data point to apachedruid.ingest.events.buffered metric. +func (mb *MetricsBuilder) RecordApachedruidIngestEventsBufferedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestServiceNameAttributeValue string, ingestBufferCapacityAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestEventsBuffered.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestServiceNameAttributeValue, ingestBufferCapacityAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestEventsDuplicateDataPoint adds a data point to apachedruid.ingest.events.duplicate metric. +func (mb *MetricsBuilder) RecordApachedruidIngestEventsDuplicateDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestEventsDuplicate.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestEventsMessageGapDataPoint adds a data point to apachedruid.ingest.events.message_gap metric. +func (mb *MetricsBuilder) RecordApachedruidIngestEventsMessageGapDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestEventsMessageGap.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestEventsProcessedDataPoint adds a data point to apachedruid.ingest.events.processed metric. +func (mb *MetricsBuilder) RecordApachedruidIngestEventsProcessedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestEventsProcessed.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestEventsProcessedWithErrorDataPoint adds a data point to apachedruid.ingest.events.processed_with_error metric. +func (mb *MetricsBuilder) RecordApachedruidIngestEventsProcessedWithErrorDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestEventsProcessedWithError.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestEventsThrownAwayDataPoint adds a data point to apachedruid.ingest.events.thrown_away metric. +func (mb *MetricsBuilder) RecordApachedruidIngestEventsThrownAwayDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestEventsThrownAway.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestEventsUnparseableDataPoint adds a data point to apachedruid.ingest.events.unparseable metric. +func (mb *MetricsBuilder) RecordApachedruidIngestEventsUnparseableDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestEventsUnparseable.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestHandoffCountDataPoint adds a data point to apachedruid.ingest.handoff.count metric. +func (mb *MetricsBuilder) RecordApachedruidIngestHandoffCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestHandoffCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestHandoffFailedDataPoint adds a data point to apachedruid.ingest.handoff.failed metric. +func (mb *MetricsBuilder) RecordApachedruidIngestHandoffFailedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestHandoffFailed.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestHandoffTimeDataPoint adds a data point to apachedruid.ingest.handoff.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestHandoffTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestHandoffTime.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestInputBytesDataPoint adds a data point to apachedruid.ingest.input.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidIngestInputBytesDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestInputBytes.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestKafkaAvgLagDataPoint adds a data point to apachedruid.ingest.kafka.avg_lag metric. +func (mb *MetricsBuilder) RecordApachedruidIngestKafkaAvgLagDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestKafkaAvgLag.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestKafkaLagDataPoint adds a data point to apachedruid.ingest.kafka.lag metric. +func (mb *MetricsBuilder) RecordApachedruidIngestKafkaLagDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestKafkaLag.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestKafkaMaxLagDataPoint adds a data point to apachedruid.ingest.kafka.max_lag metric. +func (mb *MetricsBuilder) RecordApachedruidIngestKafkaMaxLagDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestKafkaMaxLag.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestKafkaPartitionLagDataPoint adds a data point to apachedruid.ingest.kafka.partition_lag metric. +func (mb *MetricsBuilder) RecordApachedruidIngestKafkaPartitionLagDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestPartitionAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestKafkaPartitionLag.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestPartitionAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestKinesisAvgLagTimeDataPoint adds a data point to apachedruid.ingest.kinesis.avg_lag.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestKinesisAvgLagTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestKinesisAvgLagTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestKinesisLagTimeDataPoint adds a data point to apachedruid.ingest.kinesis.lag.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestKinesisLagTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestKinesisLagTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestKinesisMaxLagTimeDataPoint adds a data point to apachedruid.ingest.kinesis.max_lag.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestKinesisMaxLagTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestKinesisMaxLagTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestKinesisPartitionLagTimeDataPoint adds a data point to apachedruid.ingest.kinesis.partition_lag.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestKinesisPartitionLagTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestPartitionAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestKinesisPartitionLagTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestPartitionAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestMergeCPUDataPoint adds a data point to apachedruid.ingest.merge.cpu metric. +func (mb *MetricsBuilder) RecordApachedruidIngestMergeCPUDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestMergeCPU.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestMergeTimeDataPoint adds a data point to apachedruid.ingest.merge.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestMergeTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestMergeTime.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestNoticesQueueSizeDataPoint adds a data point to apachedruid.ingest.notices.queue_size metric. +func (mb *MetricsBuilder) RecordApachedruidIngestNoticesQueueSizeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestNoticesQueueSize.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestNoticesTimeDataPoint adds a data point to apachedruid.ingest.notices.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestNoticesTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestNoticesTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestPauseTimeDataPoint adds a data point to apachedruid.ingest.pause.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestPauseTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string) { + mb.metricApachedruidIngestPauseTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestTaskIDAttributeValue, ingestDataSourceAttributeValue) +} + +// RecordApachedruidIngestPersistsBackPressureDataPoint adds a data point to apachedruid.ingest.persists.back_pressure metric. +func (mb *MetricsBuilder) RecordApachedruidIngestPersistsBackPressureDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestPersistsBackPressure.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestPersistsCountDataPoint adds a data point to apachedruid.ingest.persists.count metric. +func (mb *MetricsBuilder) RecordApachedruidIngestPersistsCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestPersistsCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestPersistsCPUDataPoint adds a data point to apachedruid.ingest.persists.cpu metric. +func (mb *MetricsBuilder) RecordApachedruidIngestPersistsCPUDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestPersistsCPU.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestPersistsFailedDataPoint adds a data point to apachedruid.ingest.persists.failed metric. +func (mb *MetricsBuilder) RecordApachedruidIngestPersistsFailedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestPersistsFailed.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestPersistsTimeDataPoint adds a data point to apachedruid.ingest.persists.time metric. +func (mb *MetricsBuilder) RecordApachedruidIngestPersistsTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestPersistsTime.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestRowsOutputDataPoint adds a data point to apachedruid.ingest.rows.output metric. +func (mb *MetricsBuilder) RecordApachedruidIngestRowsOutputDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string) { + mb.metricApachedruidIngestRowsOutput.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestTaskIDAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue) +} + +// RecordApachedruidIngestSegmentsCountDataPoint adds a data point to apachedruid.ingest.segments.count metric. +func (mb *MetricsBuilder) RecordApachedruidIngestSegmentsCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { + mb.metricApachedruidIngestSegmentsCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue, ingestTaskIngestionModeAttributeValue) +} + +// RecordApachedruidIngestShuffleBytesDataPoint adds a data point to apachedruid.ingest.shuffle.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidIngestShuffleBytesDataPoint(ts pcommon.Timestamp, val int64, ingestSupervisorTaskIDAttributeValue string) { + mb.metricApachedruidIngestShuffleBytes.recordDataPoint(mb.startTime, ts, val, ingestSupervisorTaskIDAttributeValue) +} + +// RecordApachedruidIngestShuffleRequestsDataPoint adds a data point to apachedruid.ingest.shuffle.requests metric. +func (mb *MetricsBuilder) RecordApachedruidIngestShuffleRequestsDataPoint(ts pcommon.Timestamp, val int64, ingestSupervisorTaskIDAttributeValue string) { + mb.metricApachedruidIngestShuffleRequests.recordDataPoint(mb.startTime, ts, val, ingestSupervisorTaskIDAttributeValue) +} + +// RecordApachedruidIngestSinkCountDataPoint adds a data point to apachedruid.ingest.sink.count metric. +func (mb *MetricsBuilder) RecordApachedruidIngestSinkCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { + mb.metricApachedruidIngestSinkCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) +} + +// RecordApachedruidIngestTombstonesCountDataPoint adds a data point to apachedruid.ingest.tombstones.count metric. +func (mb *MetricsBuilder) RecordApachedruidIngestTombstonesCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { + mb.metricApachedruidIngestTombstonesCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue, ingestTaskIngestionModeAttributeValue) +} + +// RecordApachedruidIntervalCompactedCountDataPoint adds a data point to apachedruid.interval.compacted.count metric. +func (mb *MetricsBuilder) RecordApachedruidIntervalCompactedCountDataPoint(ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { + mb.metricApachedruidIntervalCompactedCount.recordDataPoint(mb.startTime, ts, val, intervalDataSourceAttributeValue) +} + +// RecordApachedruidIntervalSkipCompactCountDataPoint adds a data point to apachedruid.interval.skip_compact.count metric. +func (mb *MetricsBuilder) RecordApachedruidIntervalSkipCompactCountDataPoint(ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { + mb.metricApachedruidIntervalSkipCompactCount.recordDataPoint(mb.startTime, ts, val, intervalDataSourceAttributeValue) +} + +// RecordApachedruidIntervalWaitCompactCountDataPoint adds a data point to apachedruid.interval.wait_compact.count metric. +func (mb *MetricsBuilder) RecordApachedruidIntervalWaitCompactCountDataPoint(ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { + mb.metricApachedruidIntervalWaitCompactCount.recordDataPoint(mb.startTime, ts, val, intervalDataSourceAttributeValue) +} + +// RecordApachedruidJettyNumOpenConnectionsDataPoint adds a data point to apachedruid.jetty.num_open_connections metric. +func (mb *MetricsBuilder) RecordApachedruidJettyNumOpenConnectionsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidJettyNumOpenConnections.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidJettyThreadPoolBusyDataPoint adds a data point to apachedruid.jetty.thread_pool.busy metric. +func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolBusyDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidJettyThreadPoolBusy.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidJettyThreadPoolIdleDataPoint adds a data point to apachedruid.jetty.thread_pool.idle metric. +func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolIdleDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidJettyThreadPoolIdle.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidJettyThreadPoolIsLowOnThreadsDataPoint adds a data point to apachedruid.jetty.thread_pool.is_low_on_threads metric. +func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolIsLowOnThreadsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidJettyThreadPoolIsLowOnThreads.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidJettyThreadPoolMaxDataPoint adds a data point to apachedruid.jetty.thread_pool.max metric. +func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolMaxDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidJettyThreadPoolMax.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidJettyThreadPoolMinDataPoint adds a data point to apachedruid.jetty.thread_pool.min metric. +func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolMinDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidJettyThreadPoolMin.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidJettyThreadPoolQueueSizeDataPoint adds a data point to apachedruid.jetty.thread_pool.queue_size metric. +func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolQueueSizeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidJettyThreadPoolQueueSize.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidJettyThreadPoolTotalDataPoint adds a data point to apachedruid.jetty.thread_pool.total metric. +func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidJettyThreadPoolTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidJvmBufferpoolCapacityDataPoint adds a data point to apachedruid.jvm.bufferpool.capacity metric. +func (mb *MetricsBuilder) RecordApachedruidJvmBufferpoolCapacityDataPoint(ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { + mb.metricApachedruidJvmBufferpoolCapacity.recordDataPoint(mb.startTime, ts, val, jvmBufferpoolNameAttributeValue) +} + +// RecordApachedruidJvmBufferpoolCountDataPoint adds a data point to apachedruid.jvm.bufferpool.count metric. +func (mb *MetricsBuilder) RecordApachedruidJvmBufferpoolCountDataPoint(ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { + mb.metricApachedruidJvmBufferpoolCount.recordDataPoint(mb.startTime, ts, val, jvmBufferpoolNameAttributeValue) +} + +// RecordApachedruidJvmBufferpoolUsedDataPoint adds a data point to apachedruid.jvm.bufferpool.used metric. +func (mb *MetricsBuilder) RecordApachedruidJvmBufferpoolUsedDataPoint(ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { + mb.metricApachedruidJvmBufferpoolUsed.recordDataPoint(mb.startTime, ts, val, jvmBufferpoolNameAttributeValue) +} + +// RecordApachedruidJvmGcCountDataPoint adds a data point to apachedruid.jvm.gc.count metric. +func (mb *MetricsBuilder) RecordApachedruidJvmGcCountDataPoint(ts pcommon.Timestamp, val int64, jvmGcGenAttributeValue string, jvmGcNameAttributeValue string) { + mb.metricApachedruidJvmGcCount.recordDataPoint(mb.startTime, ts, val, jvmGcGenAttributeValue, jvmGcNameAttributeValue) +} + +// RecordApachedruidJvmGcCPUDataPoint adds a data point to apachedruid.jvm.gc.cpu metric. +func (mb *MetricsBuilder) RecordApachedruidJvmGcCPUDataPoint(ts pcommon.Timestamp, val int64, jvmGcGenAttributeValue string, jvmGcNameAttributeValue string) { + mb.metricApachedruidJvmGcCPU.recordDataPoint(mb.startTime, ts, val, jvmGcGenAttributeValue, jvmGcNameAttributeValue) +} + +// RecordApachedruidJvmMemCommittedDataPoint adds a data point to apachedruid.jvm.mem.committed metric. +func (mb *MetricsBuilder) RecordApachedruidJvmMemCommittedDataPoint(ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { + mb.metricApachedruidJvmMemCommitted.recordDataPoint(mb.startTime, ts, val, jvmMemKindAttributeValue) +} + +// RecordApachedruidJvmMemInitDataPoint adds a data point to apachedruid.jvm.mem.init metric. +func (mb *MetricsBuilder) RecordApachedruidJvmMemInitDataPoint(ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { + mb.metricApachedruidJvmMemInit.recordDataPoint(mb.startTime, ts, val, jvmMemKindAttributeValue) +} + +// RecordApachedruidJvmMemMaxDataPoint adds a data point to apachedruid.jvm.mem.max metric. +func (mb *MetricsBuilder) RecordApachedruidJvmMemMaxDataPoint(ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { + mb.metricApachedruidJvmMemMax.recordDataPoint(mb.startTime, ts, val, jvmMemKindAttributeValue) +} + +// RecordApachedruidJvmMemUsedDataPoint adds a data point to apachedruid.jvm.mem.used metric. +func (mb *MetricsBuilder) RecordApachedruidJvmMemUsedDataPoint(ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { + mb.metricApachedruidJvmMemUsed.recordDataPoint(mb.startTime, ts, val, jvmMemKindAttributeValue) +} + +// RecordApachedruidJvmPoolCommittedDataPoint adds a data point to apachedruid.jvm.pool.committed metric. +func (mb *MetricsBuilder) RecordApachedruidJvmPoolCommittedDataPoint(ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { + mb.metricApachedruidJvmPoolCommitted.recordDataPoint(mb.startTime, ts, val, jvmPoolNameAttributeValue, jvmPoolKindAttributeValue) +} + +// RecordApachedruidJvmPoolInitDataPoint adds a data point to apachedruid.jvm.pool.init metric. +func (mb *MetricsBuilder) RecordApachedruidJvmPoolInitDataPoint(ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { + mb.metricApachedruidJvmPoolInit.recordDataPoint(mb.startTime, ts, val, jvmPoolNameAttributeValue, jvmPoolKindAttributeValue) +} + +// RecordApachedruidJvmPoolMaxDataPoint adds a data point to apachedruid.jvm.pool.max metric. +func (mb *MetricsBuilder) RecordApachedruidJvmPoolMaxDataPoint(ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { + mb.metricApachedruidJvmPoolMax.recordDataPoint(mb.startTime, ts, val, jvmPoolNameAttributeValue, jvmPoolKindAttributeValue) +} + +// RecordApachedruidJvmPoolUsedDataPoint adds a data point to apachedruid.jvm.pool.used metric. +func (mb *MetricsBuilder) RecordApachedruidJvmPoolUsedDataPoint(ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { + mb.metricApachedruidJvmPoolUsed.recordDataPoint(mb.startTime, ts, val, jvmPoolNameAttributeValue, jvmPoolKindAttributeValue) +} + +// RecordApachedruidKillPendingSegmentsCountDataPoint adds a data point to apachedruid.kill.pending_segments.count metric. +func (mb *MetricsBuilder) RecordApachedruidKillPendingSegmentsCountDataPoint(ts pcommon.Timestamp, val int64, killDataSourceAttributeValue string) { + mb.metricApachedruidKillPendingSegmentsCount.recordDataPoint(mb.startTime, ts, val, killDataSourceAttributeValue) +} + +// RecordApachedruidKillTaskCountDataPoint adds a data point to apachedruid.kill.task.count metric. +func (mb *MetricsBuilder) RecordApachedruidKillTaskCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidKillTaskCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidKillTaskAvailableSlotCountDataPoint adds a data point to apachedruid.kill_task.available_slot.count metric. +func (mb *MetricsBuilder) RecordApachedruidKillTaskAvailableSlotCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidKillTaskAvailableSlotCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidKillTaskMaxSlotCountDataPoint adds a data point to apachedruid.kill_task.max_slot.count metric. +func (mb *MetricsBuilder) RecordApachedruidKillTaskMaxSlotCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidKillTaskMaxSlotCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMergeBufferPendingRequestsDataPoint adds a data point to apachedruid.merge_buffer.pending_requests metric. +func (mb *MetricsBuilder) RecordApachedruidMergeBufferPendingRequestsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMergeBufferPendingRequests.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMetadataKillAuditCountDataPoint adds a data point to apachedruid.metadata.kill.audit.count metric. +func (mb *MetricsBuilder) RecordApachedruidMetadataKillAuditCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMetadataKillAuditCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMetadataKillCompactionCountDataPoint adds a data point to apachedruid.metadata.kill.compaction.count metric. +func (mb *MetricsBuilder) RecordApachedruidMetadataKillCompactionCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMetadataKillCompactionCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMetadataKillDatasourceCountDataPoint adds a data point to apachedruid.metadata.kill.datasource.count metric. +func (mb *MetricsBuilder) RecordApachedruidMetadataKillDatasourceCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMetadataKillDatasourceCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMetadataKillRuleCountDataPoint adds a data point to apachedruid.metadata.kill.rule.count metric. +func (mb *MetricsBuilder) RecordApachedruidMetadataKillRuleCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMetadataKillRuleCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMetadataKillSupervisorCountDataPoint adds a data point to apachedruid.metadata.kill.supervisor.count metric. +func (mb *MetricsBuilder) RecordApachedruidMetadataKillSupervisorCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMetadataKillSupervisorCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMetadatacacheInitTimeDataPoint adds a data point to apachedruid.metadatacache.init.time metric. +func (mb *MetricsBuilder) RecordApachedruidMetadatacacheInitTimeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMetadatacacheInitTime.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMetadatacacheRefreshCountDataPoint adds a data point to apachedruid.metadatacache.refresh.count metric. +func (mb *MetricsBuilder) RecordApachedruidMetadatacacheRefreshCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMetadatacacheRefreshCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidMetadatacacheRefreshTimeDataPoint adds a data point to apachedruid.metadatacache.refresh.time metric. +func (mb *MetricsBuilder) RecordApachedruidMetadatacacheRefreshTimeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidMetadatacacheRefreshTime.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryByteLimitExceededCountDataPoint adds a data point to apachedruid.query.byte_limit.exceeded.count metric. +func (mb *MetricsBuilder) RecordApachedruidQueryByteLimitExceededCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryByteLimitExceededCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryBytesDataPoint adds a data point to apachedruid.query.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidQueryBytesDataPoint(ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { + mb.metricApachedruidQueryBytes.recordDataPoint(mb.startTime, ts, val, queryDataSourceAttributeValue, queryNumMetricsAttributeValue, queryDimensionAttributeValue, queryHasFiltersAttributeValue, queryThresholdAttributeValue, queryNumComplexMetricsAttributeValue, queryTypeAttributeValue, queryRemoteAddressAttributeValue, queryIDAttributeValue, queryContextAttributeValue, queryNumDimensionsAttributeValue, queryIntervalAttributeValue, queryDurationAttributeValue) +} + +// RecordApachedruidQueryCacheDeltaAverageBytesDataPoint adds a data point to apachedruid.query.cache.delta.average_bytes metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaAverageBytesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaAverageBytes.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaErrorsDataPoint adds a data point to apachedruid.query.cache.delta.errors metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaErrorsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaErrors.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaEvictionsDataPoint adds a data point to apachedruid.query.cache.delta.evictions metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaEvictionsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaEvictions.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaHitRateDataPoint adds a data point to apachedruid.query.cache.delta.hit_rate metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaHitRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricApachedruidQueryCacheDeltaHitRate.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaHitsDataPoint adds a data point to apachedruid.query.cache.delta.hits metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaHitsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaHits.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaMissesDataPoint adds a data point to apachedruid.query.cache.delta.misses metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaMissesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaMisses.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaNumEntriesDataPoint adds a data point to apachedruid.query.cache.delta.num_entries metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaNumEntriesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaNumEntries.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaPutErrorDataPoint adds a data point to apachedruid.query.cache.delta.put.error metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaPutErrorDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaPutError.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaPutOkDataPoint adds a data point to apachedruid.query.cache.delta.put.ok metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaPutOkDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaPutOk.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaPutOversizedDataPoint adds a data point to apachedruid.query.cache.delta.put.oversized metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaPutOversizedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaPutOversized.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaSizeBytesDataPoint adds a data point to apachedruid.query.cache.delta.size_bytes metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaSizeBytesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaSizeBytes.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheDeltaTimeoutsDataPoint adds a data point to apachedruid.query.cache.delta.timeouts metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaTimeoutsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheDeltaTimeouts.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheMemcachedDeltaDataPoint adds a data point to apachedruid.query.cache.memcached.delta metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheMemcachedDeltaDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheMemcachedDelta.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheMemcachedTotalDataPoint adds a data point to apachedruid.query.cache.memcached.total metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheMemcachedTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheMemcachedTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalAverageBytesDataPoint adds a data point to apachedruid.query.cache.total.average_bytes metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalAverageBytesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalAverageBytes.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalErrorsDataPoint adds a data point to apachedruid.query.cache.total.errors metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalErrorsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalErrors.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalEvictionsDataPoint adds a data point to apachedruid.query.cache.total.evictions metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalEvictionsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalEvictions.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalHitRateDataPoint adds a data point to apachedruid.query.cache.total.hit_rate metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalHitRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricApachedruidQueryCacheTotalHitRate.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalHitsDataPoint adds a data point to apachedruid.query.cache.total.hits metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalHitsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalHits.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalMissesDataPoint adds a data point to apachedruid.query.cache.total.misses metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalMissesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalMisses.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalNumEntriesDataPoint adds a data point to apachedruid.query.cache.total.num_entries metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalNumEntriesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalNumEntries.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalPutErrorDataPoint adds a data point to apachedruid.query.cache.total.put.error metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalPutErrorDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalPutError.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalPutOkDataPoint adds a data point to apachedruid.query.cache.total.put.ok metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalPutOkDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalPutOk.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalPutOversizedDataPoint adds a data point to apachedruid.query.cache.total.put.oversized metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalPutOversizedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalPutOversized.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalSizeBytesDataPoint adds a data point to apachedruid.query.cache.total.size_bytes metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalSizeBytesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalSizeBytes.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCacheTotalTimeoutsDataPoint adds a data point to apachedruid.query.cache.total.timeouts metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalTimeoutsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCacheTotalTimeouts.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCountDataPoint adds a data point to apachedruid.query.count metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryCPUTimeDataPoint adds a data point to apachedruid.query.cpu.time metric. +func (mb *MetricsBuilder) RecordApachedruidQueryCPUTimeDataPoint(ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { + mb.metricApachedruidQueryCPUTime.recordDataPoint(mb.startTime, ts, val, queryDataSourceAttributeValue, queryNumMetricsAttributeValue, queryDimensionAttributeValue, queryHasFiltersAttributeValue, queryThresholdAttributeValue, queryNumComplexMetricsAttributeValue, queryTypeAttributeValue, queryRemoteAddressAttributeValue, queryIDAttributeValue, queryContextAttributeValue, queryNumDimensionsAttributeValue, queryIntervalAttributeValue, queryDurationAttributeValue) +} + +// RecordApachedruidQueryFailedCountDataPoint adds a data point to apachedruid.query.failed.count metric. +func (mb *MetricsBuilder) RecordApachedruidQueryFailedCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryFailedCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryInterruptedCountDataPoint adds a data point to apachedruid.query.interrupted.count metric. +func (mb *MetricsBuilder) RecordApachedruidQueryInterruptedCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryInterruptedCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryNodeBackpressureDataPoint adds a data point to apachedruid.query.node.backpressure metric. +func (mb *MetricsBuilder) RecordApachedruidQueryNodeBackpressureDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { + mb.metricApachedruidQueryNodeBackpressure.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, queryServerAttributeValue, queryIDAttributeValue) +} + +// RecordApachedruidQueryNodeBytesDataPoint adds a data point to apachedruid.query.node.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidQueryNodeBytesDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { + mb.metricApachedruidQueryNodeBytes.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, queryServerAttributeValue, queryIDAttributeValue) +} + +// RecordApachedruidQueryNodeTimeDataPoint adds a data point to apachedruid.query.node.time metric. +func (mb *MetricsBuilder) RecordApachedruidQueryNodeTimeDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { + mb.metricApachedruidQueryNodeTime.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, queryServerAttributeValue, queryIDAttributeValue) +} + +// RecordApachedruidQueryNodeTtfbDataPoint adds a data point to apachedruid.query.node.ttfb metric. +func (mb *MetricsBuilder) RecordApachedruidQueryNodeTtfbDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { + mb.metricApachedruidQueryNodeTtfb.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, queryServerAttributeValue, queryIDAttributeValue) +} + +// RecordApachedruidQueryPriorityDataPoint adds a data point to apachedruid.query.priority metric. +func (mb *MetricsBuilder) RecordApachedruidQueryPriorityDataPoint(ts pcommon.Timestamp, val int64, queryTypeAttributeValue string, queryDataSourceAttributeValue string, queryLaneAttributeValue string) { + mb.metricApachedruidQueryPriority.recordDataPoint(mb.startTime, ts, val, queryTypeAttributeValue, queryDataSourceAttributeValue, queryLaneAttributeValue) +} + +// RecordApachedruidQueryRowLimitExceededCountDataPoint adds a data point to apachedruid.query.row_limit.exceeded.count metric. +func (mb *MetricsBuilder) RecordApachedruidQueryRowLimitExceededCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryRowLimitExceededCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQuerySegmentTimeDataPoint adds a data point to apachedruid.query.segment.time metric. +func (mb *MetricsBuilder) RecordApachedruidQuerySegmentTimeDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, querySegmentAttributeValue string, queryIDAttributeValue string, queryVectorizedAttributeValue string) { + mb.metricApachedruidQuerySegmentTime.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, querySegmentAttributeValue, queryIDAttributeValue, queryVectorizedAttributeValue) +} + +// RecordApachedruidQuerySegmentAndCacheTimeDataPoint adds a data point to apachedruid.query.segment_and_cache.time metric. +func (mb *MetricsBuilder) RecordApachedruidQuerySegmentAndCacheTimeDataPoint(ts pcommon.Timestamp, val int64, querySegmentAttributeValue string, queryIDAttributeValue string) { + mb.metricApachedruidQuerySegmentAndCacheTime.recordDataPoint(mb.startTime, ts, val, querySegmentAttributeValue, queryIDAttributeValue) +} + +// RecordApachedruidQuerySegmentsCountDataPoint adds a data point to apachedruid.query.segments.count metric. +func (mb *MetricsBuilder) RecordApachedruidQuerySegmentsCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQuerySegmentsCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQuerySuccessCountDataPoint adds a data point to apachedruid.query.success.count metric. +func (mb *MetricsBuilder) RecordApachedruidQuerySuccessCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQuerySuccessCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryTimeDataPoint adds a data point to apachedruid.query.time metric. +func (mb *MetricsBuilder) RecordApachedruidQueryTimeDataPoint(ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { + mb.metricApachedruidQueryTime.recordDataPoint(mb.startTime, ts, val, queryDataSourceAttributeValue, queryNumMetricsAttributeValue, queryDimensionAttributeValue, queryHasFiltersAttributeValue, queryThresholdAttributeValue, queryNumComplexMetricsAttributeValue, queryTypeAttributeValue, queryRemoteAddressAttributeValue, queryIDAttributeValue, queryContextAttributeValue, queryNumDimensionsAttributeValue, queryIntervalAttributeValue, queryDurationAttributeValue) +} + +// RecordApachedruidQueryTimeoutCountDataPoint adds a data point to apachedruid.query.timeout.count metric. +func (mb *MetricsBuilder) RecordApachedruidQueryTimeoutCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidQueryTimeoutCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidQueryWaitTimeDataPoint adds a data point to apachedruid.query.wait.time metric. +func (mb *MetricsBuilder) RecordApachedruidQueryWaitTimeDataPoint(ts pcommon.Timestamp, val int64, querySegmentAttributeValue string, queryIDAttributeValue string) { + mb.metricApachedruidQueryWaitTime.recordDataPoint(mb.startTime, ts, val, querySegmentAttributeValue, queryIDAttributeValue) +} + +// RecordApachedruidSegmentAddedBytesDataPoint adds a data point to apachedruid.segment.added.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentAddedBytesDataPoint(ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { + mb.metricApachedruidSegmentAddedBytes.recordDataPoint(mb.startTime, ts, val, segmentTaskTypeAttributeValue, segmentDataSourceAttributeValue, segmentGroupIDAttributeValue, segmentTagsAttributeValue, segmentTaskIDAttributeValue, segmentIntervalAttributeValue) +} + +// RecordApachedruidSegmentAssignSkippedCountDataPoint adds a data point to apachedruid.segment.assign_skipped.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentAssignSkippedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentAssignSkippedCount.recordDataPoint(mb.startTime, ts, val, segmentDescriptionAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentAssignedCountDataPoint adds a data point to apachedruid.segment.assigned.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentAssignedCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentAssignedCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentCompactedBytesDataPoint adds a data point to apachedruid.segment.compacted.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentCompactedBytesDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentCompactedBytes.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentCompactedCountDataPoint adds a data point to apachedruid.segment.compacted.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentCompactedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentCompactedCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentCountDataPoint adds a data point to apachedruid.segment.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentCountDataPoint(ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentCount.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentDeletedCountDataPoint adds a data point to apachedruid.segment.deleted.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentDeletedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentDeletedCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentDropQueueCountDataPoint adds a data point to apachedruid.segment.drop_queue.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentDropQueueCountDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { + mb.metricApachedruidSegmentDropQueueCount.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue) +} + +// RecordApachedruidSegmentDropSkippedCountDataPoint adds a data point to apachedruid.segment.drop_skipped.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentDropSkippedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentDropSkippedCount.recordDataPoint(mb.startTime, ts, val, segmentDescriptionAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentDroppedCountDataPoint adds a data point to apachedruid.segment.dropped.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentDroppedCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentDroppedCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentLoadQueueAssignedDataPoint adds a data point to apachedruid.segment.load_queue.assigned metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueAssignedDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentLoadQueueAssigned.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentLoadQueueCancelledDataPoint adds a data point to apachedruid.segment.load_queue.cancelled metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueCancelledDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentLoadQueueCancelled.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentLoadQueueCountDataPoint adds a data point to apachedruid.segment.load_queue.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueCountDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { + mb.metricApachedruidSegmentLoadQueueCount.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue) +} + +// RecordApachedruidSegmentLoadQueueFailedDataPoint adds a data point to apachedruid.segment.load_queue.failed metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueFailedDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentLoadQueueFailed.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentLoadQueueSizeDataPoint adds a data point to apachedruid.segment.load_queue.size metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueSizeDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { + mb.metricApachedruidSegmentLoadQueueSize.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue) +} + +// RecordApachedruidSegmentLoadQueueSuccessDataPoint adds a data point to apachedruid.segment.load_queue.success metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueSuccessDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentLoadQueueSuccess.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentMaxDataPoint adds a data point to apachedruid.segment.max metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentMaxDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSegmentMax.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSegmentMoveSkippedCountDataPoint adds a data point to apachedruid.segment.move_skipped.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentMoveSkippedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentMoveSkippedCount.recordDataPoint(mb.startTime, ts, val, segmentDescriptionAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentMovedBytesDataPoint adds a data point to apachedruid.segment.moved.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentMovedBytesDataPoint(ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { + mb.metricApachedruidSegmentMovedBytes.recordDataPoint(mb.startTime, ts, val, segmentTaskTypeAttributeValue, segmentDataSourceAttributeValue, segmentGroupIDAttributeValue, segmentTagsAttributeValue, segmentTaskIDAttributeValue, segmentIntervalAttributeValue) +} + +// RecordApachedruidSegmentMovedCountDataPoint adds a data point to apachedruid.segment.moved.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentMovedCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentMovedCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentNukedBytesDataPoint adds a data point to apachedruid.segment.nuked.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentNukedBytesDataPoint(ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { + mb.metricApachedruidSegmentNukedBytes.recordDataPoint(mb.startTime, ts, val, segmentTaskTypeAttributeValue, segmentDataSourceAttributeValue, segmentGroupIDAttributeValue, segmentTagsAttributeValue, segmentTaskIDAttributeValue, segmentIntervalAttributeValue) +} + +// RecordApachedruidSegmentOverShadowedCountDataPoint adds a data point to apachedruid.segment.over_shadowed.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentOverShadowedCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSegmentOverShadowedCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSegmentPendingDeleteDataPoint adds a data point to apachedruid.segment.pending_delete metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentPendingDeleteDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSegmentPendingDelete.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSegmentRowCountAvgDataPoint adds a data point to apachedruid.segment.row_count.avg metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentRowCountAvgDataPoint(ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentRowCountAvg.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentRowCountRangeCountDataPoint adds a data point to apachedruid.segment.row_count.range.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentRowCountRangeCountDataPoint(ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string, segmentRangeAttributeValue string) { + mb.metricApachedruidSegmentRowCountRangeCount.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue, segmentRangeAttributeValue) +} + +// RecordApachedruidSegmentScanActiveDataPoint adds a data point to apachedruid.segment.scan.active metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentScanActiveDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSegmentScanActive.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSegmentScanPendingDataPoint adds a data point to apachedruid.segment.scan.pending metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentScanPendingDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSegmentScanPending.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSegmentSizeDataPoint adds a data point to apachedruid.segment.size metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentSizeDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentSize.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentSkipCompactBytesDataPoint adds a data point to apachedruid.segment.skip_compact.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentSkipCompactBytesDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentSkipCompactBytes.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentSkipCompactCountDataPoint adds a data point to apachedruid.segment.skip_compact.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentSkipCompactCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentSkipCompactCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentUnavailableCountDataPoint adds a data point to apachedruid.segment.unavailable.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentUnavailableCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentUnavailableCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentUnderReplicatedCountDataPoint adds a data point to apachedruid.segment.under_replicated.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentUnderReplicatedCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentUnderReplicatedCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentUnneededCountDataPoint adds a data point to apachedruid.segment.unneeded.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentUnneededCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentUnneededCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentUsedDataPoint adds a data point to apachedruid.segment.used metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentUsedDataPoint(ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentUsed.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentUsedPercentDataPoint adds a data point to apachedruid.segment.used_percent metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentUsedPercentDataPoint(ts pcommon.Timestamp, val float64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentUsedPercent.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentWaitCompactBytesDataPoint adds a data point to apachedruid.segment.wait_compact.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentWaitCompactBytesDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentWaitCompactBytes.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidSegmentWaitCompactCountDataPoint adds a data point to apachedruid.segment.wait_compact.count metric. +func (mb *MetricsBuilder) RecordApachedruidSegmentWaitCompactCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { + mb.metricApachedruidSegmentWaitCompactCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) +} + +// RecordApachedruidServerviewInitTimeDataPoint adds a data point to apachedruid.serverview.init.time metric. +func (mb *MetricsBuilder) RecordApachedruidServerviewInitTimeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidServerviewInitTime.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidServerviewSyncHealthyDataPoint adds a data point to apachedruid.serverview.sync.healthy metric. +func (mb *MetricsBuilder) RecordApachedruidServerviewSyncHealthyDataPoint(ts pcommon.Timestamp, val int64, serverviewTierAttributeValue string, serverviewServerAttributeValue string) { + mb.metricApachedruidServerviewSyncHealthy.recordDataPoint(mb.startTime, ts, val, serverviewTierAttributeValue, serverviewServerAttributeValue) +} + +// RecordApachedruidServerviewSyncUnstableTimeDataPoint adds a data point to apachedruid.serverview.sync.unstable_time metric. +func (mb *MetricsBuilder) RecordApachedruidServerviewSyncUnstableTimeDataPoint(ts pcommon.Timestamp, val int64, serverviewTierAttributeValue string, serverviewServerAttributeValue string) { + mb.metricApachedruidServerviewSyncUnstableTime.recordDataPoint(mb.startTime, ts, val, serverviewTierAttributeValue, serverviewServerAttributeValue) +} + +// RecordApachedruidSQLQueryBytesDataPoint adds a data point to apachedruid.sql_query.bytes metric. +func (mb *MetricsBuilder) RecordApachedruidSQLQueryBytesDataPoint(ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { + mb.metricApachedruidSQLQueryBytes.recordDataPoint(mb.startTime, ts, val, sqlQueryDataSourceAttributeValue, sqlQueryNativeQueryIdsAttributeValue, sqlQueryEngineAttributeValue, sqlQueryRemoteAddressAttributeValue, sqlQueryIDAttributeValue, sqlQuerySuccessAttributeValue) +} + +// RecordApachedruidSQLQueryPlanningTimeMsDataPoint adds a data point to apachedruid.sql_query.planning_time_ms metric. +func (mb *MetricsBuilder) RecordApachedruidSQLQueryPlanningTimeMsDataPoint(ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { + mb.metricApachedruidSQLQueryPlanningTimeMs.recordDataPoint(mb.startTime, ts, val, sqlQueryDataSourceAttributeValue, sqlQueryNativeQueryIdsAttributeValue, sqlQueryEngineAttributeValue, sqlQueryRemoteAddressAttributeValue, sqlQueryIDAttributeValue, sqlQuerySuccessAttributeValue) +} + +// RecordApachedruidSQLQueryTimeDataPoint adds a data point to apachedruid.sql_query.time metric. +func (mb *MetricsBuilder) RecordApachedruidSQLQueryTimeDataPoint(ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { + mb.metricApachedruidSQLQueryTime.recordDataPoint(mb.startTime, ts, val, sqlQueryDataSourceAttributeValue, sqlQueryNativeQueryIdsAttributeValue, sqlQueryEngineAttributeValue, sqlQueryRemoteAddressAttributeValue, sqlQueryIDAttributeValue, sqlQuerySuccessAttributeValue) +} + +// RecordApachedruidSubqueryByteLimitCountDataPoint adds a data point to apachedruid.subquery.byte_limit.count metric. +func (mb *MetricsBuilder) RecordApachedruidSubqueryByteLimitCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSubqueryByteLimitCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSubqueryFallbackCountDataPoint adds a data point to apachedruid.subquery.fallback.count metric. +func (mb *MetricsBuilder) RecordApachedruidSubqueryFallbackCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSubqueryFallbackCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSubqueryFallbackInsufficientTypeCountDataPoint adds a data point to apachedruid.subquery.fallback.insufficient_type.count metric. +func (mb *MetricsBuilder) RecordApachedruidSubqueryFallbackInsufficientTypeCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSubqueryFallbackInsufficientTypeCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSubqueryFallbackUnknownReasonCountDataPoint adds a data point to apachedruid.subquery.fallback.unknown_reason.count metric. +func (mb *MetricsBuilder) RecordApachedruidSubqueryFallbackUnknownReasonCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSubqueryFallbackUnknownReasonCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSubqueryRowLimitCountDataPoint adds a data point to apachedruid.subquery.row_limit.count metric. +func (mb *MetricsBuilder) RecordApachedruidSubqueryRowLimitCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSubqueryRowLimitCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysCPUDataPoint adds a data point to apachedruid.sys.cpu metric. +func (mb *MetricsBuilder) RecordApachedruidSysCPUDataPoint(ts pcommon.Timestamp, val int64, sysCPUTimeAttributeValue string, sysCPUNameAttributeValue string) { + mb.metricApachedruidSysCPU.recordDataPoint(mb.startTime, ts, val, sysCPUTimeAttributeValue, sysCPUNameAttributeValue) +} + +// RecordApachedruidSysDiskQueueDataPoint adds a data point to apachedruid.sys.disk.queue metric. +func (mb *MetricsBuilder) RecordApachedruidSysDiskQueueDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + mb.metricApachedruidSysDiskQueue.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) +} + +// RecordApachedruidSysDiskReadCountDataPoint adds a data point to apachedruid.sys.disk.read.count metric. +func (mb *MetricsBuilder) RecordApachedruidSysDiskReadCountDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + mb.metricApachedruidSysDiskReadCount.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) +} + +// RecordApachedruidSysDiskReadSizeDataPoint adds a data point to apachedruid.sys.disk.read.size metric. +func (mb *MetricsBuilder) RecordApachedruidSysDiskReadSizeDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + mb.metricApachedruidSysDiskReadSize.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) +} + +// RecordApachedruidSysDiskTransferTimeDataPoint adds a data point to apachedruid.sys.disk.transfer_time metric. +func (mb *MetricsBuilder) RecordApachedruidSysDiskTransferTimeDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + mb.metricApachedruidSysDiskTransferTime.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) +} + +// RecordApachedruidSysDiskWriteCountDataPoint adds a data point to apachedruid.sys.disk.write.count metric. +func (mb *MetricsBuilder) RecordApachedruidSysDiskWriteCountDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + mb.metricApachedruidSysDiskWriteCount.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) +} + +// RecordApachedruidSysDiskWriteSizeDataPoint adds a data point to apachedruid.sys.disk.write.size metric. +func (mb *MetricsBuilder) RecordApachedruidSysDiskWriteSizeDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { + mb.metricApachedruidSysDiskWriteSize.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) +} + +// RecordApachedruidSysFsFilesCountDataPoint adds a data point to apachedruid.sys.fs.files.count metric. +func (mb *MetricsBuilder) RecordApachedruidSysFsFilesCountDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { + mb.metricApachedruidSysFsFilesCount.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue, sysFsDevNameAttributeValue) +} + +// RecordApachedruidSysFsFilesFreeDataPoint adds a data point to apachedruid.sys.fs.files.free metric. +func (mb *MetricsBuilder) RecordApachedruidSysFsFilesFreeDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { + mb.metricApachedruidSysFsFilesFree.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue, sysFsDevNameAttributeValue) +} + +// RecordApachedruidSysFsMaxDataPoint adds a data point to apachedruid.sys.fs.max metric. +func (mb *MetricsBuilder) RecordApachedruidSysFsMaxDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { + mb.metricApachedruidSysFsMax.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue, sysFsDevNameAttributeValue) +} + +// RecordApachedruidSysFsUsedDataPoint adds a data point to apachedruid.sys.fs.used metric. +func (mb *MetricsBuilder) RecordApachedruidSysFsUsedDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { + mb.metricApachedruidSysFsUsed.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue, sysFsDevNameAttributeValue) +} + +// RecordApachedruidSysLa1DataPoint adds a data point to apachedruid.sys.la.1 metric. +func (mb *MetricsBuilder) RecordApachedruidSysLa1DataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysLa1.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysLa15DataPoint adds a data point to apachedruid.sys.la.15 metric. +func (mb *MetricsBuilder) RecordApachedruidSysLa15DataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysLa15.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysLa5DataPoint adds a data point to apachedruid.sys.la.5 metric. +func (mb *MetricsBuilder) RecordApachedruidSysLa5DataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysLa5.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysMemFreeDataPoint adds a data point to apachedruid.sys.mem.free metric. +func (mb *MetricsBuilder) RecordApachedruidSysMemFreeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysMemFree.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysMemMaxDataPoint adds a data point to apachedruid.sys.mem.max metric. +func (mb *MetricsBuilder) RecordApachedruidSysMemMaxDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysMemMax.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysMemUsedDataPoint adds a data point to apachedruid.sys.mem.used metric. +func (mb *MetricsBuilder) RecordApachedruidSysMemUsedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysMemUsed.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysNetReadDroppedDataPoint adds a data point to apachedruid.sys.net.read.dropped metric. +func (mb *MetricsBuilder) RecordApachedruidSysNetReadDroppedDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + mb.metricApachedruidSysNetReadDropped.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) +} + +// RecordApachedruidSysNetReadErrorsDataPoint adds a data point to apachedruid.sys.net.read.errors metric. +func (mb *MetricsBuilder) RecordApachedruidSysNetReadErrorsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + mb.metricApachedruidSysNetReadErrors.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) +} + +// RecordApachedruidSysNetReadPacketsDataPoint adds a data point to apachedruid.sys.net.read.packets metric. +func (mb *MetricsBuilder) RecordApachedruidSysNetReadPacketsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + mb.metricApachedruidSysNetReadPackets.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) +} + +// RecordApachedruidSysNetReadSizeDataPoint adds a data point to apachedruid.sys.net.read.size metric. +func (mb *MetricsBuilder) RecordApachedruidSysNetReadSizeDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + mb.metricApachedruidSysNetReadSize.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) +} + +// RecordApachedruidSysNetWriteCollisionsDataPoint adds a data point to apachedruid.sys.net.write.collisions metric. +func (mb *MetricsBuilder) RecordApachedruidSysNetWriteCollisionsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + mb.metricApachedruidSysNetWriteCollisions.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) +} + +// RecordApachedruidSysNetWriteErrorsDataPoint adds a data point to apachedruid.sys.net.write.errors metric. +func (mb *MetricsBuilder) RecordApachedruidSysNetWriteErrorsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + mb.metricApachedruidSysNetWriteErrors.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) +} + +// RecordApachedruidSysNetWritePacketsDataPoint adds a data point to apachedruid.sys.net.write.packets metric. +func (mb *MetricsBuilder) RecordApachedruidSysNetWritePacketsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + mb.metricApachedruidSysNetWritePackets.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) +} + +// RecordApachedruidSysNetWriteSizeDataPoint adds a data point to apachedruid.sys.net.write.size metric. +func (mb *MetricsBuilder) RecordApachedruidSysNetWriteSizeDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { + mb.metricApachedruidSysNetWriteSize.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) +} + +// RecordApachedruidSysStorageUsedDataPoint adds a data point to apachedruid.sys.storage.used metric. +func (mb *MetricsBuilder) RecordApachedruidSysStorageUsedDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string) { + mb.metricApachedruidSysStorageUsed.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue) +} + +// RecordApachedruidSysSwapFreeDataPoint adds a data point to apachedruid.sys.swap.free metric. +func (mb *MetricsBuilder) RecordApachedruidSysSwapFreeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysSwapFree.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysSwapMaxDataPoint adds a data point to apachedruid.sys.swap.max metric. +func (mb *MetricsBuilder) RecordApachedruidSysSwapMaxDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysSwapMax.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysSwapPageInDataPoint adds a data point to apachedruid.sys.swap.page_in metric. +func (mb *MetricsBuilder) RecordApachedruidSysSwapPageInDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysSwapPageIn.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysSwapPageOutDataPoint adds a data point to apachedruid.sys.swap.page_out metric. +func (mb *MetricsBuilder) RecordApachedruidSysSwapPageOutDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysSwapPageOut.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4ActiveOpensDataPoint adds a data point to apachedruid.sys.tcpv4.active_opens metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4ActiveOpensDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4ActiveOpens.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4AttemptFailsDataPoint adds a data point to apachedruid.sys.tcpv4.attempt_fails metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4AttemptFailsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4AttemptFails.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4EstabResetsDataPoint adds a data point to apachedruid.sys.tcpv4.estab_resets metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4EstabResetsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4EstabResets.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4InErrsDataPoint adds a data point to apachedruid.sys.tcpv4.in.errs metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4InErrsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4InErrs.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4InSegsDataPoint adds a data point to apachedruid.sys.tcpv4.in.segs metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4InSegsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4InSegs.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4OutRstsDataPoint adds a data point to apachedruid.sys.tcpv4.out.rsts metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4OutRstsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4OutRsts.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4OutSegsDataPoint adds a data point to apachedruid.sys.tcpv4.out.segs metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4OutSegsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4OutSegs.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4PassiveOpensDataPoint adds a data point to apachedruid.sys.tcpv4.passive_opens metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4PassiveOpensDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4PassiveOpens.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysTcpv4RetransSegsDataPoint adds a data point to apachedruid.sys.tcpv4.retrans.segs metric. +func (mb *MetricsBuilder) RecordApachedruidSysTcpv4RetransSegsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysTcpv4RetransSegs.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidSysUptimeDataPoint adds a data point to apachedruid.sys.uptime metric. +func (mb *MetricsBuilder) RecordApachedruidSysUptimeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidSysUptime.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidTaskActionBatchAttemptsDataPoint adds a data point to apachedruid.task.action.batch.attempts metric. +func (mb *MetricsBuilder) RecordApachedruidTaskActionBatchAttemptsDataPoint(ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { + mb.metricApachedruidTaskActionBatchAttempts.recordDataPoint(mb.startTime, ts, val, taskIntervalAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue) +} + +// RecordApachedruidTaskActionBatchQueueTimeDataPoint adds a data point to apachedruid.task.action.batch.queue_time metric. +func (mb *MetricsBuilder) RecordApachedruidTaskActionBatchQueueTimeDataPoint(ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { + mb.metricApachedruidTaskActionBatchQueueTime.recordDataPoint(mb.startTime, ts, val, taskIntervalAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue) +} + +// RecordApachedruidTaskActionBatchRunTimeDataPoint adds a data point to apachedruid.task.action.batch.run_time metric. +func (mb *MetricsBuilder) RecordApachedruidTaskActionBatchRunTimeDataPoint(ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { + mb.metricApachedruidTaskActionBatchRunTime.recordDataPoint(mb.startTime, ts, val, taskIntervalAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue) +} + +// RecordApachedruidTaskActionBatchSizeDataPoint adds a data point to apachedruid.task.action.batch.size metric. +func (mb *MetricsBuilder) RecordApachedruidTaskActionBatchSizeDataPoint(ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { + mb.metricApachedruidTaskActionBatchSize.recordDataPoint(mb.startTime, ts, val, taskIntervalAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue) +} + +// RecordApachedruidTaskActionFailedCountDataPoint adds a data point to apachedruid.task.action.failed.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskActionFailedCountDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + mb.metricApachedruidTaskActionFailedCount.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) +} + +// RecordApachedruidTaskActionLogTimeDataPoint adds a data point to apachedruid.task.action.log.time metric. +func (mb *MetricsBuilder) RecordApachedruidTaskActionLogTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + mb.metricApachedruidTaskActionLogTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) +} + +// RecordApachedruidTaskActionRunTimeDataPoint adds a data point to apachedruid.task.action.run.time metric. +func (mb *MetricsBuilder) RecordApachedruidTaskActionRunTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + mb.metricApachedruidTaskActionRunTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) +} + +// RecordApachedruidTaskActionSuccessCountDataPoint adds a data point to apachedruid.task.action.success.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskActionSuccessCountDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + mb.metricApachedruidTaskActionSuccessCount.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) +} + +// RecordApachedruidTaskFailedCountDataPoint adds a data point to apachedruid.task.failed.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskFailedCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + mb.metricApachedruidTaskFailedCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) +} + +// RecordApachedruidTaskPendingCountDataPoint adds a data point to apachedruid.task.pending.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskPendingCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + mb.metricApachedruidTaskPendingCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) +} + +// RecordApachedruidTaskPendingTimeDataPoint adds a data point to apachedruid.task.pending.time metric. +func (mb *MetricsBuilder) RecordApachedruidTaskPendingTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + mb.metricApachedruidTaskPendingTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) +} + +// RecordApachedruidTaskRunTimeDataPoint adds a data point to apachedruid.task.run.time metric. +func (mb *MetricsBuilder) RecordApachedruidTaskRunTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskStatusAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + mb.metricApachedruidTaskRunTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskGroupIDAttributeValue, taskStatusAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) +} + +// RecordApachedruidTaskRunningCountDataPoint adds a data point to apachedruid.task.running.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskRunningCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + mb.metricApachedruidTaskRunningCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) +} + +// RecordApachedruidTaskSegmentAvailabilityWaitTimeDataPoint adds a data point to apachedruid.task.segment_availability.wait.time metric. +func (mb *MetricsBuilder) RecordApachedruidTaskSegmentAvailabilityWaitTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskSegmentAvailabilityConfirmedAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { + mb.metricApachedruidTaskSegmentAvailabilityWaitTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskGroupIDAttributeValue, taskSegmentAvailabilityConfirmedAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) +} + +// RecordApachedruidTaskSuccessCountDataPoint adds a data point to apachedruid.task.success.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskSuccessCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + mb.metricApachedruidTaskSuccessCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) +} + +// RecordApachedruidTaskWaitingCountDataPoint adds a data point to apachedruid.task.waiting.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskWaitingCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { + mb.metricApachedruidTaskWaitingCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) +} + +// RecordApachedruidTaskSlotBlacklistedCountDataPoint adds a data point to apachedruid.task_slot.blacklisted.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskSlotBlacklistedCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + mb.metricApachedruidTaskSlotBlacklistedCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) +} + +// RecordApachedruidTaskSlotIdleCountDataPoint adds a data point to apachedruid.task_slot.idle.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskSlotIdleCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + mb.metricApachedruidTaskSlotIdleCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) +} + +// RecordApachedruidTaskSlotLazyCountDataPoint adds a data point to apachedruid.task_slot.lazy.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskSlotLazyCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + mb.metricApachedruidTaskSlotLazyCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) +} + +// RecordApachedruidTaskSlotTotalCountDataPoint adds a data point to apachedruid.task_slot.total.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskSlotTotalCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + mb.metricApachedruidTaskSlotTotalCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) +} + +// RecordApachedruidTaskSlotUsedCountDataPoint adds a data point to apachedruid.task_slot.used.count metric. +func (mb *MetricsBuilder) RecordApachedruidTaskSlotUsedCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { + mb.metricApachedruidTaskSlotUsedCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) +} + +// RecordApachedruidTierHistoricalCountDataPoint adds a data point to apachedruid.tier.historical.count metric. +func (mb *MetricsBuilder) RecordApachedruidTierHistoricalCountDataPoint(ts pcommon.Timestamp, val int64, tierAttributeValue string) { + mb.metricApachedruidTierHistoricalCount.recordDataPoint(mb.startTime, ts, val, tierAttributeValue) +} + +// RecordApachedruidTierReplicationFactorDataPoint adds a data point to apachedruid.tier.replication.factor metric. +func (mb *MetricsBuilder) RecordApachedruidTierReplicationFactorDataPoint(ts pcommon.Timestamp, val int64, tierAttributeValue string) { + mb.metricApachedruidTierReplicationFactor.recordDataPoint(mb.startTime, ts, val, tierAttributeValue) +} + +// RecordApachedruidTierRequiredCapacityDataPoint adds a data point to apachedruid.tier.required.capacity metric. +func (mb *MetricsBuilder) RecordApachedruidTierRequiredCapacityDataPoint(ts pcommon.Timestamp, val int64, tierAttributeValue string) { + mb.metricApachedruidTierRequiredCapacity.recordDataPoint(mb.startTime, ts, val, tierAttributeValue) +} + +// RecordApachedruidTierTotalCapacityDataPoint adds a data point to apachedruid.tier.total.capacity metric. +func (mb *MetricsBuilder) RecordApachedruidTierTotalCapacityDataPoint(ts pcommon.Timestamp, val int64, tierAttributeValue string) { + mb.metricApachedruidTierTotalCapacity.recordDataPoint(mb.startTime, ts, val, tierAttributeValue) +} + +// RecordApachedruidWorkerTaskFailedCountDataPoint adds a data point to apachedruid.worker.task.failed.count metric. +func (mb *MetricsBuilder) RecordApachedruidWorkerTaskFailedCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + mb.metricApachedruidWorkerTaskFailedCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) +} + +// RecordApachedruidWorkerTaskSuccessCountDataPoint adds a data point to apachedruid.worker.task.success.count metric. +func (mb *MetricsBuilder) RecordApachedruidWorkerTaskSuccessCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + mb.metricApachedruidWorkerTaskSuccessCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) +} + +// RecordApachedruidWorkerTaskSlotIdleCountDataPoint adds a data point to apachedruid.worker.task_slot.idle.count metric. +func (mb *MetricsBuilder) RecordApachedruidWorkerTaskSlotIdleCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + mb.metricApachedruidWorkerTaskSlotIdleCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) +} + +// RecordApachedruidWorkerTaskSlotTotalCountDataPoint adds a data point to apachedruid.worker.task_slot.total.count metric. +func (mb *MetricsBuilder) RecordApachedruidWorkerTaskSlotTotalCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + mb.metricApachedruidWorkerTaskSlotTotalCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) +} + +// RecordApachedruidWorkerTaskSlotUsedCountDataPoint adds a data point to apachedruid.worker.task_slot.used.count metric. +func (mb *MetricsBuilder) RecordApachedruidWorkerTaskSlotUsedCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { + mb.metricApachedruidWorkerTaskSlotUsedCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) +} + +// RecordApachedruidZkConnectedDataPoint adds a data point to apachedruid.zk.connected metric. +func (mb *MetricsBuilder) RecordApachedruidZkConnectedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidZkConnected.recordDataPoint(mb.startTime, ts, val) +} + +// RecordApachedruidZkReconnectTimeDataPoint adds a data point to apachedruid.zk.reconnect.time metric. +func (mb *MetricsBuilder) RecordApachedruidZkReconnectTimeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricApachedruidZkReconnectTime.recordDataPoint(mb.startTime, ts, val) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_metrics_test.go b/receiver/apachedruidreceiver/internal/metadata/generated_metrics_test.go new file mode 100644 index 0000000000000..35b53657c8667 --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/generated_metrics_test.go @@ -0,0 +1,5541 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +type testConfigCollection int + +const ( + testSetDefault testConfigCollection = iota + testSetAll + testSetNone +) + +func TestMetricsBuilder(t *testing.T) { + tests := []struct { + name string + configSet testConfigCollection + }{ + { + name: "default", + configSet: testSetDefault, + }, + { + name: "all_set", + configSet: testSetAll, + }, + { + name: "none_set", + configSet: testSetNone, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + start := pcommon.Timestamp(1_000_000_000) + ts := pcommon.Timestamp(1_000_001_000) + observedZapCore, observedLogs := observer.New(zap.WarnLevel) + settings := receivertest.NewNopCreateSettings() + settings.Logger = zap.New(observedZapCore) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) + + expectedWarnings := 0 + + assert.Equal(t, expectedWarnings, observedLogs.Len()) + + defaultMetricsCount := 0 + allMetricsCount := 0 + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidCompactSegmentAnalyzerFetchAndProcessMillisDataPoint(ts, 1, "compact_task_type-val", "compact_data_source-val", "compact_group_id-val", "compact_tags-val", "compact_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidCompactTaskCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidCompactTaskAvailableSlotCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidCompactTaskMaxSlotCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidCoordinatorGlobalTimeDataPoint(ts, 1, "coordinator_duty_group-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidCoordinatorTimeDataPoint(ts, 1, "coordinator_duty-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestBytesReceivedDataPoint(ts, 1, "ingest_task_type-val", "ingest_task_id-val", "ingest_data_source-val", "ingest_service_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val", "ingest_task_ingestion_mode-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestEventsBufferedDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_service_name-val", "ingest_buffer_capacity-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestEventsDuplicateDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestEventsMessageGapDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestEventsProcessedDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestEventsProcessedWithErrorDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestEventsThrownAwayDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestEventsUnparseableDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestHandoffCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestHandoffFailedDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestHandoffTimeDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestInputBytesDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestKafkaAvgLagDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestKafkaLagDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestKafkaMaxLagDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestKafkaPartitionLagDataPoint(ts, 1, "ingest_tags-val", "ingest_partition-val", "ingest_stream-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestKinesisAvgLagTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestKinesisLagTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestKinesisMaxLagTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestKinesisPartitionLagTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_partition-val", "ingest_stream-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestMergeCPUDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestMergeTimeDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestNoticesQueueSizeDataPoint(ts, 1, "ingest_tags-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestNoticesTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestPauseTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_task_id-val", "ingest_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestPersistsBackPressureDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestPersistsCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestPersistsCPUDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestPersistsFailedDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestPersistsTimeDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestRowsOutputDataPoint(ts, 1, "ingest_task_type-val", "ingest_task_id-val", "ingest_data_source-val", "ingest_group_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestSegmentsCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val", "ingest_task_ingestion_mode-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestShuffleBytesDataPoint(ts, 1, "ingest_supervisor_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestShuffleRequestsDataPoint(ts, 1, "ingest_supervisor_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestSinkCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIngestTombstonesCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val", "ingest_task_ingestion_mode-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIntervalCompactedCountDataPoint(ts, 1, "interval_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIntervalSkipCompactCountDataPoint(ts, 1, "interval_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidIntervalWaitCompactCountDataPoint(ts, 1, "interval_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJettyNumOpenConnectionsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJettyThreadPoolBusyDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJettyThreadPoolIdleDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJettyThreadPoolIsLowOnThreadsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJettyThreadPoolMaxDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJettyThreadPoolMinDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJettyThreadPoolQueueSizeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJettyThreadPoolTotalDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmBufferpoolCapacityDataPoint(ts, 1, "jvm_bufferpool_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmBufferpoolCountDataPoint(ts, 1, "jvm_bufferpool_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmBufferpoolUsedDataPoint(ts, 1, "jvm_bufferpool_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmGcCountDataPoint(ts, 1, "jvm_gc_gen-val", "jvm_gc_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmGcCPUDataPoint(ts, 1, "jvm_gc_gen-val", "jvm_gc_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmMemCommittedDataPoint(ts, 1, "jvm_mem_kind-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmMemInitDataPoint(ts, 1, "jvm_mem_kind-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmMemMaxDataPoint(ts, 1, "jvm_mem_kind-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmMemUsedDataPoint(ts, 1, "jvm_mem_kind-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmPoolCommittedDataPoint(ts, 1, "jvm_pool_name-val", "jvm_pool_kind-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmPoolInitDataPoint(ts, 1, "jvm_pool_name-val", "jvm_pool_kind-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmPoolMaxDataPoint(ts, 1, "jvm_pool_name-val", "jvm_pool_kind-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidJvmPoolUsedDataPoint(ts, 1, "jvm_pool_name-val", "jvm_pool_kind-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidKillPendingSegmentsCountDataPoint(ts, 1, "kill_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidKillTaskCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidKillTaskAvailableSlotCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidKillTaskMaxSlotCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMergeBufferPendingRequestsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMetadataKillAuditCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMetadataKillCompactionCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMetadataKillDatasourceCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMetadataKillRuleCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMetadataKillSupervisorCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMetadatacacheInitTimeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMetadatacacheRefreshCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidMetadatacacheRefreshTimeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryByteLimitExceededCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryBytesDataPoint(ts, 1, "query_data_source-val", "query_num_metrics-val", "query_dimension-val", "query_has_filters-val", 15, 25, "query_type-val", "query_remote_address-val", "query_id-val", "query_context-val", "query_num_dimensions-val", "query_interval-val", "query_duration-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaAverageBytesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaErrorsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaEvictionsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaHitRateDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaHitsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaMissesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaNumEntriesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaPutErrorDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaPutOkDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaPutOversizedDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaSizeBytesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheDeltaTimeoutsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheMemcachedDeltaDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheMemcachedTotalDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalAverageBytesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalErrorsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalEvictionsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalHitRateDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalHitsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalMissesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalNumEntriesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalPutErrorDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalPutOkDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalPutOversizedDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalSizeBytesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCacheTotalTimeoutsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryCPUTimeDataPoint(ts, 1, "query_data_source-val", "query_num_metrics-val", "query_dimension-val", "query_has_filters-val", 15, 25, "query_type-val", "query_remote_address-val", "query_id-val", "query_context-val", "query_num_dimensions-val", "query_interval-val", "query_duration-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryFailedCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryInterruptedCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryNodeBackpressureDataPoint(ts, 1, "query_status-val", "query_server-val", "query_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryNodeBytesDataPoint(ts, 1, "query_status-val", "query_server-val", "query_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryNodeTimeDataPoint(ts, 1, "query_status-val", "query_server-val", "query_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryNodeTtfbDataPoint(ts, 1, "query_status-val", "query_server-val", "query_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryPriorityDataPoint(ts, 1, "query_type-val", "query_data_source-val", "query_lane-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryRowLimitExceededCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQuerySegmentTimeDataPoint(ts, 1, "query_status-val", "query_segment-val", "query_id-val", "query_vectorized-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQuerySegmentAndCacheTimeDataPoint(ts, 1, "query_segment-val", "query_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQuerySegmentsCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQuerySuccessCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryTimeDataPoint(ts, 1, "query_data_source-val", "query_num_metrics-val", "query_dimension-val", "query_has_filters-val", 15, 25, "query_type-val", "query_remote_address-val", "query_id-val", "query_context-val", "query_num_dimensions-val", "query_interval-val", "query_duration-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryTimeoutCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidQueryWaitTimeDataPoint(ts, 1, "query_segment-val", "query_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentAddedBytesDataPoint(ts, 1, "segment_task_type-val", "segment_data_source-val", "segment_group_id-val", "segment_tags-val", "segment_task_id-val", "segment_interval-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentAssignSkippedCountDataPoint(ts, 1, "segment_description-val", "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentAssignedCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentCompactedBytesDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentCompactedCountDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentCountDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentDeletedCountDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentDropQueueCountDataPoint(ts, 1, "segment_server-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentDropSkippedCountDataPoint(ts, 1, "segment_description-val", "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentDroppedCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentLoadQueueAssignedDataPoint(ts, 1, "segment_server-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentLoadQueueCancelledDataPoint(ts, 1, "segment_server-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentLoadQueueCountDataPoint(ts, 1, "segment_server-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentLoadQueueFailedDataPoint(ts, 1, "segment_server-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentLoadQueueSizeDataPoint(ts, 1, "segment_server-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentLoadQueueSuccessDataPoint(ts, 1, "segment_server-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentMaxDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentMoveSkippedCountDataPoint(ts, 1, "segment_description-val", "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentMovedBytesDataPoint(ts, 1, "segment_task_type-val", "segment_data_source-val", "segment_group_id-val", "segment_tags-val", "segment_task_id-val", "segment_interval-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentMovedCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentNukedBytesDataPoint(ts, 1, "segment_task_type-val", "segment_data_source-val", "segment_group_id-val", "segment_tags-val", "segment_task_id-val", "segment_interval-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentOverShadowedCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentPendingDeleteDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentRowCountAvgDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentRowCountRangeCountDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val", "segment_range-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentScanActiveDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentScanPendingDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentSizeDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentSkipCompactBytesDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentSkipCompactCountDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentUnavailableCountDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentUnderReplicatedCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentUnneededCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentUsedDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentUsedPercentDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentWaitCompactBytesDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSegmentWaitCompactCountDataPoint(ts, 1, "segment_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidServerviewInitTimeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidServerviewSyncHealthyDataPoint(ts, 1, "serverview_tier-val", "serverview_server-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidServerviewSyncUnstableTimeDataPoint(ts, 1, "serverview_tier-val", "serverview_server-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSQLQueryBytesDataPoint(ts, 1, "sqlQuery_data_source-val", "sqlQuery_native_query_ids-val", "sqlQuery_engine-val", "sqlQuery_remote_address-val", "sqlQuery_id-val", "sqlQuery_success-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSQLQueryPlanningTimeMsDataPoint(ts, 1, "sqlQuery_data_source-val", "sqlQuery_native_query_ids-val", "sqlQuery_engine-val", "sqlQuery_remote_address-val", "sqlQuery_id-val", "sqlQuery_success-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSQLQueryTimeDataPoint(ts, 1, "sqlQuery_data_source-val", "sqlQuery_native_query_ids-val", "sqlQuery_engine-val", "sqlQuery_remote_address-val", "sqlQuery_id-val", "sqlQuery_success-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSubqueryByteLimitCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSubqueryFallbackCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSubqueryFallbackInsufficientTypeCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSubqueryFallbackUnknownReasonCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSubqueryRowLimitCountDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysCPUDataPoint(ts, 1, "sys_cpu_time-val", "sys_cpu_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysDiskQueueDataPoint(ts, 1, "sys_disk_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysDiskReadCountDataPoint(ts, 1, "sys_disk_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysDiskReadSizeDataPoint(ts, 1, "sys_disk_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysDiskTransferTimeDataPoint(ts, 1, "sys_disk_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysDiskWriteCountDataPoint(ts, 1, "sys_disk_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysDiskWriteSizeDataPoint(ts, 1, "sys_disk_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysFsFilesCountDataPoint(ts, 1, "sys_fs_dir_name-val", "sys_fs_dev_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysFsFilesFreeDataPoint(ts, 1, "sys_fs_dir_name-val", "sys_fs_dev_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysFsMaxDataPoint(ts, 1, "sys_fs_dir_name-val", "sys_fs_dev_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysFsUsedDataPoint(ts, 1, "sys_fs_dir_name-val", "sys_fs_dev_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysLa1DataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysLa15DataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysLa5DataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysMemFreeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysMemMaxDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysMemUsedDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysNetReadDroppedDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysNetReadErrorsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysNetReadPacketsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysNetReadSizeDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysNetWriteCollisionsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysNetWriteErrorsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysNetWritePacketsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysNetWriteSizeDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysStorageUsedDataPoint(ts, 1, "sys_fs_dir_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysSwapFreeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysSwapMaxDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysSwapPageInDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysSwapPageOutDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4ActiveOpensDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4AttemptFailsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4EstabResetsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4InErrsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4InSegsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4OutRstsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4OutSegsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4PassiveOpensDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysTcpv4RetransSegsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidSysUptimeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskActionBatchAttemptsDataPoint(ts, 1, "task_interval-val", "task_data_source-val", "task_action_type-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskActionBatchQueueTimeDataPoint(ts, 1, "task_interval-val", "task_data_source-val", "task_action_type-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskActionBatchRunTimeDataPoint(ts, 1, "task_interval-val", "task_data_source-val", "task_action_type-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskActionBatchSizeDataPoint(ts, 1, "task_interval-val", "task_data_source-val", "task_action_type-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskActionFailedCountDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_action_type-val", "task_group_id-val", "task_tags-val", "task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskActionLogTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_action_type-val", "task_group_id-val", "task_tags-val", "task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskActionRunTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_action_type-val", "task_group_id-val", "task_tags-val", "task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskActionSuccessCountDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_action_type-val", "task_group_id-val", "task_tags-val", "task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskFailedCountDataPoint(ts, 1, "task_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskPendingCountDataPoint(ts, 1, "task_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskPendingTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_group_id-val", "task_tags-val", "task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskRunTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_group_id-val", "task_status-val", "task_tags-val", "task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskRunningCountDataPoint(ts, 1, "task_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskSegmentAvailabilityWaitTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_group_id-val", "task_segment_availability_confirmed-val", "task_tags-val", "task_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskSuccessCountDataPoint(ts, 1, "task_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskWaitingCountDataPoint(ts, 1, "task_data_source-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskSlotBlacklistedCountDataPoint(ts, 1, "taskSlot_category-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskSlotIdleCountDataPoint(ts, 1, "taskSlot_category-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskSlotLazyCountDataPoint(ts, 1, "taskSlot_category-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskSlotTotalCountDataPoint(ts, 1, "taskSlot_category-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTaskSlotUsedCountDataPoint(ts, 1, "taskSlot_category-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTierHistoricalCountDataPoint(ts, 1, "tier-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTierReplicationFactorDataPoint(ts, 1, "tier-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTierRequiredCapacityDataPoint(ts, 1, "tier-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidTierTotalCapacityDataPoint(ts, 1, "tier-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidWorkerTaskFailedCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidWorkerTaskSuccessCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidWorkerTaskSlotIdleCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidWorkerTaskSlotTotalCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidWorkerTaskSlotUsedCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidZkConnectedDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordApachedruidZkReconnectTimeDataPoint(ts, 1) + + rb := mb.NewResourceBuilder() + rb.SetApachedruidClusterName("apachedruid.cluster.name-val") + rb.SetApachedruidNodeHost("apachedruid.node.host-val") + rb.SetApachedruidNodeService("apachedruid.node.service-val") + res := rb.Emit() + metrics := mb.Emit(WithResource(res)) + + if test.configSet == testSetNone { + assert.Equal(t, 0, metrics.ResourceMetrics().Len()) + return + } + + assert.Equal(t, 1, metrics.ResourceMetrics().Len()) + rm := metrics.ResourceMetrics().At(0) + assert.Equal(t, res, rm.Resource()) + assert.Equal(t, 1, rm.ScopeMetrics().Len()) + ms := rm.ScopeMetrics().At(0).Metrics() + if test.configSet == testSetDefault { + assert.Equal(t, defaultMetricsCount, ms.Len()) + } + if test.configSet == testSetAll { + assert.Equal(t, allMetricsCount, ms.Len()) + } + validatedMetrics := make(map[string]bool) + for i := 0; i < ms.Len(); i++ { + switch ms.At(i).Name() { + case "apachedruid.compact.segment_analyzer.fetch_and_process_millis": + assert.False(t, validatedMetrics["apachedruid.compact.segment_analyzer.fetch_and_process_millis"], "Found a duplicate in the metrics slice: apachedruid.compact.segment_analyzer.fetch_and_process_millis") + validatedMetrics["apachedruid.compact.segment_analyzer.fetch_and_process_millis"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time taken to fetch and process segments to infer the schema for the compaction task to run.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "compact_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "compact_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "compact_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "compact_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "compact_task_id-val", attrVal.Str()) + case "apachedruid.compact.task.count": + assert.False(t, validatedMetrics["apachedruid.compact.task.count"], "Found a duplicate in the metrics slice: apachedruid.compact.task.count") + validatedMetrics["apachedruid.compact.task.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of tasks issued in the auto compaction run.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.compact_task.available_slot.count": + assert.False(t, validatedMetrics["apachedruid.compact_task.available_slot.count"], "Found a duplicate in the metrics slice: apachedruid.compact_task.available_slot.count") + validatedMetrics["apachedruid.compact_task.available_slot.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of available task slots that can be used for auto compaction tasks in the auto compaction run. This is the max number of task slots minus any currently running compaction tasks.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.compact_task.max_slot.count": + assert.False(t, validatedMetrics["apachedruid.compact_task.max_slot.count"], "Found a duplicate in the metrics slice: apachedruid.compact_task.max_slot.count") + validatedMetrics["apachedruid.compact_task.max_slot.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Maximum number of task slots available for auto compaction tasks in the auto compaction run.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.coordinator.global.time": + assert.False(t, validatedMetrics["apachedruid.coordinator.global.time"], "Found a duplicate in the metrics slice: apachedruid.coordinator.global.time") + validatedMetrics["apachedruid.coordinator.global.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. For example, Historical Management or Indexing.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("duty_group") + assert.True(t, ok) + assert.EqualValues(t, "coordinator_duty_group-val", attrVal.Str()) + case "apachedruid.coordinator.time": + assert.False(t, validatedMetrics["apachedruid.coordinator.time"], "Found a duplicate in the metrics slice: apachedruid.coordinator.time") + validatedMetrics["apachedruid.coordinator.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Approximate Coordinator duty runtime in milliseconds.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("duty") + assert.True(t, ok) + assert.EqualValues(t, "coordinator_duty-val", attrVal.Str()) + case "apachedruid.ingest.bytes.received": + assert.False(t, validatedMetrics["apachedruid.ingest.bytes.received"], "Found a duplicate in the metrics slice: apachedruid.ingest.bytes.received") + validatedMetrics["apachedruid.ingest.bytes.received"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of bytes received by the `EventReceiverFirehose`.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("service_name") + assert.True(t, ok) + assert.EqualValues(t, "ingest_service_name-val", attrVal.Str()) + case "apachedruid.ingest.count": + assert.False(t, validatedMetrics["apachedruid.ingest.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.count") + validatedMetrics["apachedruid.ingest.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Count of `1` every time an ingestion job runs (includes compaction jobs). Aggregate using dimensions.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_ingestion_mode") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_ingestion_mode-val", attrVal.Str()) + case "apachedruid.ingest.events.buffered": + assert.False(t, validatedMetrics["apachedruid.ingest.events.buffered"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.buffered") + validatedMetrics["apachedruid.ingest.events.buffered"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of events queued in the `EventReceiverFirehose` buffer.", ms.At(i).Description()) + assert.Equal(t, "{events}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("service_name") + assert.True(t, ok) + assert.EqualValues(t, "ingest_service_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("buffer_capacity") + assert.True(t, ok) + assert.EqualValues(t, "ingest_buffer_capacity-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.events.duplicate": + assert.False(t, validatedMetrics["apachedruid.ingest.events.duplicate"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.duplicate") + validatedMetrics["apachedruid.ingest.events.duplicate"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of events rejected because the events are duplicated.", ms.At(i).Description()) + assert.Equal(t, "{events}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.events.message_gap": + assert.False(t, validatedMetrics["apachedruid.ingest.events.message_gap"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.message_gap") + validatedMetrics["apachedruid.ingest.events.message_gap"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time gap in milliseconds between the latest ingested event timestamp and the current system timestamp of metrics emission. If the value is increasing but lag is low, Druid may not be receiving new data. This metric is reset as new tasks spawn up.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.events.processed": + assert.False(t, validatedMetrics["apachedruid.ingest.events.processed"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.processed") + validatedMetrics["apachedruid.ingest.events.processed"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of events processed per emission period.", ms.At(i).Description()) + assert.Equal(t, "{events}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.events.processed_with_error": + assert.False(t, validatedMetrics["apachedruid.ingest.events.processed_with_error"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.processed_with_error") + validatedMetrics["apachedruid.ingest.events.processed_with_error"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of events processed with some partial errors per emission period. Events processed with partial errors are counted towards both this metric and `ingest/events/processed`.", ms.At(i).Description()) + assert.Equal(t, "{events}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.events.thrown_away": + assert.False(t, validatedMetrics["apachedruid.ingest.events.thrown_away"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.thrown_away") + validatedMetrics["apachedruid.ingest.events.thrown_away"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of events rejected because they are null, or filtered by `transformSpec`, or outside one of `lateMessageRejectionPeriod`, `earlyMessageRejectionPeriod`, or `windowPeriod`.", ms.At(i).Description()) + assert.Equal(t, "{events}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.events.unparseable": + assert.False(t, validatedMetrics["apachedruid.ingest.events.unparseable"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.unparseable") + validatedMetrics["apachedruid.ingest.events.unparseable"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of events rejected because the events are unparseable.", ms.At(i).Description()) + assert.Equal(t, "{events}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.handoff.count": + assert.False(t, validatedMetrics["apachedruid.ingest.handoff.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.handoff.count") + validatedMetrics["apachedruid.ingest.handoff.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of handoffs that happened.", ms.At(i).Description()) + assert.Equal(t, "{handoffs}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.handoff.failed": + assert.False(t, validatedMetrics["apachedruid.ingest.handoff.failed"], "Found a duplicate in the metrics slice: apachedruid.ingest.handoff.failed") + validatedMetrics["apachedruid.ingest.handoff.failed"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of handoffs that failed.", ms.At(i).Description()) + assert.Equal(t, "{handoffs}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.handoff.time": + assert.False(t, validatedMetrics["apachedruid.ingest.handoff.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.handoff.time") + validatedMetrics["apachedruid.ingest.handoff.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of milliseconds taken to handoff a set of segments.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.input.bytes": + assert.False(t, validatedMetrics["apachedruid.ingest.input.bytes"], "Found a duplicate in the metrics slice: apachedruid.ingest.input.bytes") + validatedMetrics["apachedruid.ingest.input.bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes read from input sources, after decompression but prior to parsing. This covers all data read, including data that does not end up being fully processed and ingested. For example, this includes data that ends up being rejected for being unparseable or filtered out.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.kafka.avg_lag": + assert.False(t, validatedMetrics["apachedruid.ingest.kafka.avg_lag"], "Found a duplicate in the metrics slice: apachedruid.ingest.kafka.avg_lag") + validatedMetrics["apachedruid.ingest.kafka.avg_lag"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("stream") + assert.True(t, ok) + assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.kafka.lag": + assert.False(t, validatedMetrics["apachedruid.ingest.kafka.lag"], "Found a duplicate in the metrics slice: apachedruid.ingest.kafka.lag") + validatedMetrics["apachedruid.ingest.kafka.lag"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("stream") + assert.True(t, ok) + assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.kafka.max_lag": + assert.False(t, validatedMetrics["apachedruid.ingest.kafka.max_lag"], "Found a duplicate in the metrics slice: apachedruid.ingest.kafka.max_lag") + validatedMetrics["apachedruid.ingest.kafka.max_lag"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("stream") + assert.True(t, ok) + assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.kafka.partition_lag": + assert.False(t, validatedMetrics["apachedruid.ingest.kafka.partition_lag"], "Found a duplicate in the metrics slice: apachedruid.ingest.kafka.partition_lag") + validatedMetrics["apachedruid.ingest.kafka.partition_lag"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Partition-wise lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers. Minimum emission period for this metric is a minute.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("partition") + assert.True(t, ok) + assert.EqualValues(t, "ingest_partition-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("stream") + assert.True(t, ok) + assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.kinesis.avg_lag.time": + assert.False(t, validatedMetrics["apachedruid.ingest.kinesis.avg_lag.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.kinesis.avg_lag.time") + validatedMetrics["apachedruid.ingest.kinesis.avg_lag.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("stream") + assert.True(t, ok) + assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.kinesis.lag.time": + assert.False(t, validatedMetrics["apachedruid.ingest.kinesis.lag.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.kinesis.lag.time") + validatedMetrics["apachedruid.ingest.kinesis.lag.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("stream") + assert.True(t, ok) + assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.kinesis.max_lag.time": + assert.False(t, validatedMetrics["apachedruid.ingest.kinesis.max_lag.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.kinesis.max_lag.time") + validatedMetrics["apachedruid.ingest.kinesis.max_lag.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Max lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("stream") + assert.True(t, ok) + assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.kinesis.partition_lag.time": + assert.False(t, validatedMetrics["apachedruid.ingest.kinesis.partition_lag.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.kinesis.partition_lag.time") + validatedMetrics["apachedruid.ingest.kinesis.partition_lag.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Partition-wise lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis. Minimum emission period for this metric is a minute.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("partition") + assert.True(t, ok) + assert.EqualValues(t, "ingest_partition-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("stream") + assert.True(t, ok) + assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.merge.cpu": + assert.False(t, validatedMetrics["apachedruid.ingest.merge.cpu"], "Found a duplicate in the metrics slice: apachedruid.ingest.merge.cpu") + validatedMetrics["apachedruid.ingest.merge.cpu"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "CPU time in Nanoseconds spent on merging intermediate segments.", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.merge.time": + assert.False(t, validatedMetrics["apachedruid.ingest.merge.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.merge.time") + validatedMetrics["apachedruid.ingest.merge.time"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Milliseconds spent merging intermediate segments.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.notices.queue_size": + assert.False(t, validatedMetrics["apachedruid.ingest.notices.queue_size"], "Found a duplicate in the metrics slice: apachedruid.ingest.notices.queue_size") + validatedMetrics["apachedruid.ingest.notices.queue_size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of pending notices to be processed by the coordinator.", ms.At(i).Description()) + assert.Equal(t, "{notices}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.notices.time": + assert.False(t, validatedMetrics["apachedruid.ingest.notices.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.notices.time") + validatedMetrics["apachedruid.ingest.notices.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to process a notice by the supervisor.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.pause.time": + assert.False(t, validatedMetrics["apachedruid.ingest.pause.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.pause.time") + validatedMetrics["apachedruid.ingest.pause.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds spent by a task in a paused state without ingesting.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + case "apachedruid.ingest.persists.back_pressure": + assert.False(t, validatedMetrics["apachedruid.ingest.persists.back_pressure"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.back_pressure") + validatedMetrics["apachedruid.ingest.persists.back_pressure"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Milliseconds spent creating persist tasks and blocking waiting for them to finish.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.persists.count": + assert.False(t, validatedMetrics["apachedruid.ingest.persists.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.count") + validatedMetrics["apachedruid.ingest.persists.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of times persist occurred.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.persists.cpu": + assert.False(t, validatedMetrics["apachedruid.ingest.persists.cpu"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.cpu") + validatedMetrics["apachedruid.ingest.persists.cpu"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "CPU time in nanoseconds spent on doing intermediate persist.", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.persists.failed": + assert.False(t, validatedMetrics["apachedruid.ingest.persists.failed"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.failed") + validatedMetrics["apachedruid.ingest.persists.failed"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of persists that failed.", ms.At(i).Description()) + assert.Equal(t, "{persists}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.persists.time": + assert.False(t, validatedMetrics["apachedruid.ingest.persists.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.time") + validatedMetrics["apachedruid.ingest.persists.time"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Milliseconds spent doing intermediate persist.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.rows.output": + assert.False(t, validatedMetrics["apachedruid.ingest.rows.output"], "Found a duplicate in the metrics slice: apachedruid.ingest.rows.output") + validatedMetrics["apachedruid.ingest.rows.output"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of Druid rows persisted.", ms.At(i).Description()) + assert.Equal(t, "{rows}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + case "apachedruid.ingest.segments.count": + assert.False(t, validatedMetrics["apachedruid.ingest.segments.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.segments.count") + validatedMetrics["apachedruid.ingest.segments.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Count of final segments created by job (includes tombstones).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_ingestion_mode") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_ingestion_mode-val", attrVal.Str()) + case "apachedruid.ingest.shuffle.bytes": + assert.False(t, validatedMetrics["apachedruid.ingest.shuffle.bytes"], "Found a duplicate in the metrics slice: apachedruid.ingest.shuffle.bytes") + validatedMetrics["apachedruid.ingest.shuffle.bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes shuffled per emission period.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("supervisor_task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_supervisor_task_id-val", attrVal.Str()) + case "apachedruid.ingest.shuffle.requests": + assert.False(t, validatedMetrics["apachedruid.ingest.shuffle.requests"], "Found a duplicate in the metrics slice: apachedruid.ingest.shuffle.requests") + validatedMetrics["apachedruid.ingest.shuffle.requests"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of shuffle requests per emission period.", ms.At(i).Description()) + assert.Equal(t, "{requests}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("supervisor_task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_supervisor_task_id-val", attrVal.Str()) + case "apachedruid.ingest.sink.count": + assert.False(t, validatedMetrics["apachedruid.ingest.sink.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.sink.count") + validatedMetrics["apachedruid.ingest.sink.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of sinks not handed off.", ms.At(i).Description()) + assert.Equal(t, "{sinks}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + case "apachedruid.ingest.tombstones.count": + assert.False(t, validatedMetrics["apachedruid.ingest.tombstones.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.tombstones.count") + validatedMetrics["apachedruid.ingest.tombstones.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Count of tombstones created by job.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_ingestion_mode") + assert.True(t, ok) + assert.EqualValues(t, "ingest_task_ingestion_mode-val", attrVal.Str()) + case "apachedruid.interval.compacted.count": + assert.False(t, validatedMetrics["apachedruid.interval.compacted.count"], "Found a duplicate in the metrics slice: apachedruid.interval.compacted.count") + validatedMetrics["apachedruid.interval.compacted.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total number of intervals of this datasource that are already compacted with the spec set in the auto compaction config.", ms.At(i).Description()) + assert.Equal(t, "{intervals}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "interval_data_source-val", attrVal.Str()) + case "apachedruid.interval.skip_compact.count": + assert.False(t, validatedMetrics["apachedruid.interval.skip_compact.count"], "Found a duplicate in the metrics slice: apachedruid.interval.skip_compact.count") + validatedMetrics["apachedruid.interval.skip_compact.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of intervals of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.", ms.At(i).Description()) + assert.Equal(t, "{intervals}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "interval_data_source-val", attrVal.Str()) + case "apachedruid.interval.wait_compact.count": + assert.False(t, validatedMetrics["apachedruid.interval.wait_compact.count"], "Found a duplicate in the metrics slice: apachedruid.interval.wait_compact.count") + validatedMetrics["apachedruid.interval.wait_compact.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of intervals of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).", ms.At(i).Description()) + assert.Equal(t, "{intervals}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "interval_data_source-val", attrVal.Str()) + case "apachedruid.jetty.num_open_connections": + assert.False(t, validatedMetrics["apachedruid.jetty.num_open_connections"], "Found a duplicate in the metrics slice: apachedruid.jetty.num_open_connections") + validatedMetrics["apachedruid.jetty.num_open_connections"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of open jetty connections.", ms.At(i).Description()) + assert.Equal(t, "{connections}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.jetty.thread_pool.busy": + assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.busy"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.busy") + validatedMetrics["apachedruid.jetty.thread_pool.busy"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of busy threads that has work to do from the worker queue.", ms.At(i).Description()) + assert.Equal(t, "{threads}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.jetty.thread_pool.idle": + assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.idle"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.idle") + validatedMetrics["apachedruid.jetty.thread_pool.idle"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of idle threads.", ms.At(i).Description()) + assert.Equal(t, "{threads}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.jetty.thread_pool.is_low_on_threads": + assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.is_low_on_threads"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.is_low_on_threads") + validatedMetrics["apachedruid.jetty.thread_pool.is_low_on_threads"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "A rough indicator of whether number of total workable threads allocated is enough to handle the works in the work queue.", ms.At(i).Description()) + assert.Equal(t, "{threads}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.jetty.thread_pool.max": + assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.max"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.max") + validatedMetrics["apachedruid.jetty.thread_pool.max"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of maximum threads allocatable.", ms.At(i).Description()) + assert.Equal(t, "{threads}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.jetty.thread_pool.min": + assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.min"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.min") + validatedMetrics["apachedruid.jetty.thread_pool.min"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of minimum threads allocatable.", ms.At(i).Description()) + assert.Equal(t, "{threads}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.jetty.thread_pool.queue_size": + assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.queue_size"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.queue_size") + validatedMetrics["apachedruid.jetty.thread_pool.queue_size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Size of the worker queue.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.jetty.thread_pool.total": + assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.total"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.total") + validatedMetrics["apachedruid.jetty.thread_pool.total"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of total workable threads allocated.", ms.At(i).Description()) + assert.Equal(t, "{threads}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.jvm.bufferpool.capacity": + assert.False(t, validatedMetrics["apachedruid.jvm.bufferpool.capacity"], "Found a duplicate in the metrics slice: apachedruid.jvm.bufferpool.capacity") + validatedMetrics["apachedruid.jvm.bufferpool.capacity"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Bufferpool capacity.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("bufferpool_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_bufferpool_name-val", attrVal.Str()) + case "apachedruid.jvm.bufferpool.count": + assert.False(t, validatedMetrics["apachedruid.jvm.bufferpool.count"], "Found a duplicate in the metrics slice: apachedruid.jvm.bufferpool.count") + validatedMetrics["apachedruid.jvm.bufferpool.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Bufferpool count.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("bufferpool_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_bufferpool_name-val", attrVal.Str()) + case "apachedruid.jvm.bufferpool.used": + assert.False(t, validatedMetrics["apachedruid.jvm.bufferpool.used"], "Found a duplicate in the metrics slice: apachedruid.jvm.bufferpool.used") + validatedMetrics["apachedruid.jvm.bufferpool.used"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Bufferpool used.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("bufferpool_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_bufferpool_name-val", attrVal.Str()) + case "apachedruid.jvm.gc.count": + assert.False(t, validatedMetrics["apachedruid.jvm.gc.count"], "Found a duplicate in the metrics slice: apachedruid.jvm.gc.count") + validatedMetrics["apachedruid.jvm.gc.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Garbage collection count.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("gc_gen") + assert.True(t, ok) + assert.EqualValues(t, "jvm_gc_gen-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("gc_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_gc_name-val", attrVal.Str()) + case "apachedruid.jvm.gc.cpu": + assert.False(t, validatedMetrics["apachedruid.jvm.gc.cpu"], "Found a duplicate in the metrics slice: apachedruid.jvm.gc.cpu") + validatedMetrics["apachedruid.jvm.gc.cpu"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Count of CPU time in Nanoseconds spent on garbage collection. Note, `jvm/gc/cpu` represents the total time over multiple GC cycles; divide by `jvm/gc/count` to get the mean GC time per cycle.", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("gc_gen") + assert.True(t, ok) + assert.EqualValues(t, "jvm_gc_gen-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("gc_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_gc_name-val", attrVal.Str()) + case "apachedruid.jvm.mem.committed": + assert.False(t, validatedMetrics["apachedruid.jvm.mem.committed"], "Found a duplicate in the metrics slice: apachedruid.jvm.mem.committed") + validatedMetrics["apachedruid.jvm.mem.committed"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Committed memory.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("mem_kind") + assert.True(t, ok) + assert.EqualValues(t, "jvm_mem_kind-val", attrVal.Str()) + case "apachedruid.jvm.mem.init": + assert.False(t, validatedMetrics["apachedruid.jvm.mem.init"], "Found a duplicate in the metrics slice: apachedruid.jvm.mem.init") + validatedMetrics["apachedruid.jvm.mem.init"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Initial memory.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("mem_kind") + assert.True(t, ok) + assert.EqualValues(t, "jvm_mem_kind-val", attrVal.Str()) + case "apachedruid.jvm.mem.max": + assert.False(t, validatedMetrics["apachedruid.jvm.mem.max"], "Found a duplicate in the metrics slice: apachedruid.jvm.mem.max") + validatedMetrics["apachedruid.jvm.mem.max"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Max memory.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("mem_kind") + assert.True(t, ok) + assert.EqualValues(t, "jvm_mem_kind-val", attrVal.Str()) + case "apachedruid.jvm.mem.used": + assert.False(t, validatedMetrics["apachedruid.jvm.mem.used"], "Found a duplicate in the metrics slice: apachedruid.jvm.mem.used") + validatedMetrics["apachedruid.jvm.mem.used"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Used memory.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("mem_kind") + assert.True(t, ok) + assert.EqualValues(t, "jvm_mem_kind-val", attrVal.Str()) + case "apachedruid.jvm.pool.committed": + assert.False(t, validatedMetrics["apachedruid.jvm.pool.committed"], "Found a duplicate in the metrics slice: apachedruid.jvm.pool.committed") + validatedMetrics["apachedruid.jvm.pool.committed"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Committed pool.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("pool_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_pool_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("pool_kind") + assert.True(t, ok) + assert.EqualValues(t, "jvm_pool_kind-val", attrVal.Str()) + case "apachedruid.jvm.pool.init": + assert.False(t, validatedMetrics["apachedruid.jvm.pool.init"], "Found a duplicate in the metrics slice: apachedruid.jvm.pool.init") + validatedMetrics["apachedruid.jvm.pool.init"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Initial pool.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("pool_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_pool_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("pool_kind") + assert.True(t, ok) + assert.EqualValues(t, "jvm_pool_kind-val", attrVal.Str()) + case "apachedruid.jvm.pool.max": + assert.False(t, validatedMetrics["apachedruid.jvm.pool.max"], "Found a duplicate in the metrics slice: apachedruid.jvm.pool.max") + validatedMetrics["apachedruid.jvm.pool.max"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Max pool.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("pool_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_pool_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("pool_kind") + assert.True(t, ok) + assert.EqualValues(t, "jvm_pool_kind-val", attrVal.Str()) + case "apachedruid.jvm.pool.used": + assert.False(t, validatedMetrics["apachedruid.jvm.pool.used"], "Found a duplicate in the metrics slice: apachedruid.jvm.pool.used") + validatedMetrics["apachedruid.jvm.pool.used"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Pool used.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("pool_name") + assert.True(t, ok) + assert.EqualValues(t, "jvm_pool_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("pool_kind") + assert.True(t, ok) + assert.EqualValues(t, "jvm_pool_kind-val", attrVal.Str()) + case "apachedruid.kill.pending_segments.count": + assert.False(t, validatedMetrics["apachedruid.kill.pending_segments.count"], "Found a duplicate in the metrics slice: apachedruid.kill.pending_segments.count") + validatedMetrics["apachedruid.kill.pending_segments.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of stale pending segments deleted from the metadata store.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "kill_data_source-val", attrVal.Str()) + case "apachedruid.kill.task.count": + assert.False(t, validatedMetrics["apachedruid.kill.task.count"], "Found a duplicate in the metrics slice: apachedruid.kill.task.count") + validatedMetrics["apachedruid.kill.task.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of tasks issued in the auto kill run.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.kill_task.available_slot.count": + assert.False(t, validatedMetrics["apachedruid.kill_task.available_slot.count"], "Found a duplicate in the metrics slice: apachedruid.kill_task.available_slot.count") + validatedMetrics["apachedruid.kill_task.available_slot.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of available task slots that can be used for auto kill tasks in the auto kill run. This is the max number of task slots minus any currently running auto kill tasks.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.kill_task.max_slot.count": + assert.False(t, validatedMetrics["apachedruid.kill_task.max_slot.count"], "Found a duplicate in the metrics slice: apachedruid.kill_task.max_slot.count") + validatedMetrics["apachedruid.kill_task.max_slot.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Maximum number of task slots available for auto kill tasks in the auto kill run.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.merge_buffer.pending_requests": + assert.False(t, validatedMetrics["apachedruid.merge_buffer.pending_requests"], "Found a duplicate in the metrics slice: apachedruid.merge_buffer.pending_requests") + validatedMetrics["apachedruid.merge_buffer.pending_requests"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of requests waiting to acquire a batch of buffers from the merge buffer pool.", ms.At(i).Description()) + assert.Equal(t, "{requests}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.metadata.kill.audit.count": + assert.False(t, validatedMetrics["apachedruid.metadata.kill.audit.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.audit.count") + validatedMetrics["apachedruid.metadata.kill.audit.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. This metric is emitted only when `druid.coordinator.kill.audit.on` is set to true.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.metadata.kill.compaction.count": + assert.False(t, validatedMetrics["apachedruid.metadata.kill.compaction.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.compaction.count") + validatedMetrics["apachedruid.metadata.kill.compaction.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. This metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.metadata.kill.datasource.count": + assert.False(t, validatedMetrics["apachedruid.metadata.kill.datasource.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.datasource.count") + validatedMetrics["apachedruid.metadata.kill.datasource.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run. Note that datasource metadata only exists for datasource created from supervisor. This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.metadata.kill.rule.count": + assert.False(t, validatedMetrics["apachedruid.metadata.kill.rule.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.rule.count") + validatedMetrics["apachedruid.metadata.kill.rule.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.rule.on` is set to true.", ms.At(i).Description()) + assert.Equal(t, "{rules}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.metadata.kill.supervisor.count": + assert.False(t, validatedMetrics["apachedruid.metadata.kill.supervisor.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.supervisor.count") + validatedMetrics["apachedruid.metadata.kill.supervisor.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true.", ms.At(i).Description()) + assert.Equal(t, "{supervisors}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.metadatacache.init.time": + assert.False(t, validatedMetrics["apachedruid.metadatacache.init.time"], "Found a duplicate in the metrics slice: apachedruid.metadatacache.init.time") + validatedMetrics["apachedruid.metadatacache.init.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time taken to initialize the broker segment metadata cache. Useful to detect if brokers are taking too long to start.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.metadatacache.refresh.count": + assert.False(t, validatedMetrics["apachedruid.metadatacache.refresh.count"], "Found a duplicate in the metrics slice: apachedruid.metadatacache.refresh.count") + validatedMetrics["apachedruid.metadatacache.refresh.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments to refresh in broker segment metadata cache.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.metadatacache.refresh.time": + assert.False(t, validatedMetrics["apachedruid.metadatacache.refresh.time"], "Found a duplicate in the metrics slice: apachedruid.metadatacache.refresh.time") + validatedMetrics["apachedruid.metadatacache.refresh.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time taken to refresh segments in broker segment metadata cache.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.byte_limit.exceeded.count": + assert.False(t, validatedMetrics["apachedruid.query.byte_limit.exceeded.count"], "Found a duplicate in the metrics slice: apachedruid.query.byte_limit.exceeded.count") + validatedMetrics["apachedruid.query.byte_limit.exceeded.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of queries whose inlined subquery results exceeded the given byte limit.", ms.At(i).Description()) + assert.Equal(t, "{queries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.bytes": + assert.False(t, validatedMetrics["apachedruid.query.bytes"], "Found a duplicate in the metrics slice: apachedruid.query.bytes") + validatedMetrics["apachedruid.query.bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The total number of bytes returned to the requesting client in the query response from the broker. Other services report the total bytes for their portion of the query.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "query_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("num_metrics") + assert.True(t, ok) + assert.EqualValues(t, "query_num_metrics-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("dimension") + assert.True(t, ok) + assert.EqualValues(t, "query_dimension-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("has_filters") + assert.True(t, ok) + assert.EqualValues(t, "query_has_filters-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("threshold") + assert.True(t, ok) + assert.EqualValues(t, 15, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("num_complex_metrics") + assert.True(t, ok) + assert.EqualValues(t, 25, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("type") + assert.True(t, ok) + assert.EqualValues(t, "query_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("remote_address") + assert.True(t, ok) + assert.EqualValues(t, "query_remote_address-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("context") + assert.True(t, ok) + assert.EqualValues(t, "query_context-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("num_dimensions") + assert.True(t, ok) + assert.EqualValues(t, "query_num_dimensions-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "query_interval-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("duration") + assert.True(t, ok) + assert.EqualValues(t, "query_duration-val", attrVal.Str()) + case "apachedruid.query.cache.delta.average_bytes": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.average_bytes"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.average_bytes") + validatedMetrics["apachedruid.query.cache.delta.average_bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Average cache entry byte size.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.errors": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.errors"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.errors") + validatedMetrics["apachedruid.query.cache.delta.errors"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of cache errors.", ms.At(i).Description()) + assert.Equal(t, "{errors}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.evictions": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.evictions"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.evictions") + validatedMetrics["apachedruid.query.cache.delta.evictions"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of cache evictions.", ms.At(i).Description()) + assert.Equal(t, "{evictions}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.hit_rate": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.hit_rate"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.hit_rate") + validatedMetrics["apachedruid.query.cache.delta.hit_rate"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Cache hit rate.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "apachedruid.query.cache.delta.hits": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.hits"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.hits") + validatedMetrics["apachedruid.query.cache.delta.hits"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of cache hits.", ms.At(i).Description()) + assert.Equal(t, "{hits}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.misses": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.misses"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.misses") + validatedMetrics["apachedruid.query.cache.delta.misses"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of cache misses.", ms.At(i).Description()) + assert.Equal(t, "{misses}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.num_entries": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.num_entries"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.num_entries") + validatedMetrics["apachedruid.query.cache.delta.num_entries"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of cache entries.", ms.At(i).Description()) + assert.Equal(t, "{entries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.put.error": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.put.error"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.put.error") + validatedMetrics["apachedruid.query.cache.delta.put.error"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of new cache entries that could not be cached due to errors.", ms.At(i).Description()) + assert.Equal(t, "{errors}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.put.ok": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.put.ok"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.put.ok") + validatedMetrics["apachedruid.query.cache.delta.put.ok"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of new cache entries successfully cached.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.put.oversized": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.put.oversized"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.put.oversized") + validatedMetrics["apachedruid.query.cache.delta.put.oversized"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.size_bytes": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.size_bytes"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.size_bytes") + validatedMetrics["apachedruid.query.cache.delta.size_bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Size in bytes of cache entries.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.delta.timeouts": + assert.False(t, validatedMetrics["apachedruid.query.cache.delta.timeouts"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.timeouts") + validatedMetrics["apachedruid.query.cache.delta.timeouts"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of cache timeouts.", ms.At(i).Description()) + assert.Equal(t, "{timeouts}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.memcached.delta": + assert.False(t, validatedMetrics["apachedruid.query.cache.memcached.delta"], "Found a duplicate in the metrics slice: apachedruid.query.cache.memcached.delta") + validatedMetrics["apachedruid.query.cache.memcached.delta"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their delta from the prior event emission.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.memcached.total": + assert.False(t, validatedMetrics["apachedruid.query.cache.memcached.total"], "Found a duplicate in the metrics slice: apachedruid.query.cache.memcached.total") + validatedMetrics["apachedruid.query.cache.memcached.total"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their actual values.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.average_bytes": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.average_bytes"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.average_bytes") + validatedMetrics["apachedruid.query.cache.total.average_bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average cache entry byte size.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.errors": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.errors"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.errors") + validatedMetrics["apachedruid.query.cache.total.errors"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of cache errors.", ms.At(i).Description()) + assert.Equal(t, "{errors}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.evictions": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.evictions"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.evictions") + validatedMetrics["apachedruid.query.cache.total.evictions"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of cache evictions.", ms.At(i).Description()) + assert.Equal(t, "{evictions}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.hit_rate": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.hit_rate"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.hit_rate") + validatedMetrics["apachedruid.query.cache.total.hit_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Cache hit rate.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "apachedruid.query.cache.total.hits": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.hits"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.hits") + validatedMetrics["apachedruid.query.cache.total.hits"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of cache hits.", ms.At(i).Description()) + assert.Equal(t, "{hits}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.misses": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.misses"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.misses") + validatedMetrics["apachedruid.query.cache.total.misses"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of cache misses.", ms.At(i).Description()) + assert.Equal(t, "{misses}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.num_entries": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.num_entries"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.num_entries") + validatedMetrics["apachedruid.query.cache.total.num_entries"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of cache entries.", ms.At(i).Description()) + assert.Equal(t, "{entries}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.put.error": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.put.error"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.put.error") + validatedMetrics["apachedruid.query.cache.total.put.error"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of new cache entries that could not be cached due to errors.", ms.At(i).Description()) + assert.Equal(t, "{errors}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.put.ok": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.put.ok"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.put.ok") + validatedMetrics["apachedruid.query.cache.total.put.ok"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of new cache entries successfully cached.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.put.oversized": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.put.oversized"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.put.oversized") + validatedMetrics["apachedruid.query.cache.total.put.oversized"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.size_bytes": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.size_bytes"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.size_bytes") + validatedMetrics["apachedruid.query.cache.total.size_bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Size in bytes of cache entries.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cache.total.timeouts": + assert.False(t, validatedMetrics["apachedruid.query.cache.total.timeouts"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.timeouts") + validatedMetrics["apachedruid.query.cache.total.timeouts"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of cache timeouts.", ms.At(i).Description()) + assert.Equal(t, "{timeouts}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.count": + assert.False(t, validatedMetrics["apachedruid.query.count"], "Found a duplicate in the metrics slice: apachedruid.query.count") + validatedMetrics["apachedruid.query.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of total queries.", ms.At(i).Description()) + assert.Equal(t, "{queries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.cpu.time": + assert.False(t, validatedMetrics["apachedruid.query.cpu.time"], "Found a duplicate in the metrics slice: apachedruid.query.cpu.time") + validatedMetrics["apachedruid.query.cpu.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Microseconds of CPU time taken to complete a query.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "query_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("num_metrics") + assert.True(t, ok) + assert.EqualValues(t, "query_num_metrics-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("dimension") + assert.True(t, ok) + assert.EqualValues(t, "query_dimension-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("has_filters") + assert.True(t, ok) + assert.EqualValues(t, "query_has_filters-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("threshold") + assert.True(t, ok) + assert.EqualValues(t, 15, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("num_complex_metrics") + assert.True(t, ok) + assert.EqualValues(t, 25, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("type") + assert.True(t, ok) + assert.EqualValues(t, "query_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("remote_address") + assert.True(t, ok) + assert.EqualValues(t, "query_remote_address-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("context") + assert.True(t, ok) + assert.EqualValues(t, "query_context-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("num_dimensions") + assert.True(t, ok) + assert.EqualValues(t, "query_num_dimensions-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "query_interval-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("duration") + assert.True(t, ok) + assert.EqualValues(t, "query_duration-val", attrVal.Str()) + case "apachedruid.query.failed.count": + assert.False(t, validatedMetrics["apachedruid.query.failed.count"], "Found a duplicate in the metrics slice: apachedruid.query.failed.count") + validatedMetrics["apachedruid.query.failed.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of failed queries.", ms.At(i).Description()) + assert.Equal(t, "{queries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.interrupted.count": + assert.False(t, validatedMetrics["apachedruid.query.interrupted.count"], "Found a duplicate in the metrics slice: apachedruid.query.interrupted.count") + validatedMetrics["apachedruid.query.interrupted.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of queries interrupted due to cancellation.", ms.At(i).Description()) + assert.Equal(t, "{queries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.node.backpressure": + assert.False(t, validatedMetrics["apachedruid.query.node.backpressure"], "Found a duplicate in the metrics slice: apachedruid.query.node.backpressure") + validatedMetrics["apachedruid.query.node.backpressure"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds that the channel to this process has spent suspended due to backpressure.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("status") + assert.True(t, ok) + assert.EqualValues(t, "query_status-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "query_server-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + case "apachedruid.query.node.bytes": + assert.False(t, validatedMetrics["apachedruid.query.node.bytes"], "Found a duplicate in the metrics slice: apachedruid.query.node.bytes") + validatedMetrics["apachedruid.query.node.bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of bytes returned from querying individual historical/realtime processes.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("status") + assert.True(t, ok) + assert.EqualValues(t, "query_status-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "query_server-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + case "apachedruid.query.node.time": + assert.False(t, validatedMetrics["apachedruid.query.node.time"], "Found a duplicate in the metrics slice: apachedruid.query.node.time") + validatedMetrics["apachedruid.query.node.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to query individual historical/realtime processes.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("status") + assert.True(t, ok) + assert.EqualValues(t, "query_status-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "query_server-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + case "apachedruid.query.node.ttfb": + assert.False(t, validatedMetrics["apachedruid.query.node.ttfb"], "Found a duplicate in the metrics slice: apachedruid.query.node.ttfb") + validatedMetrics["apachedruid.query.node.ttfb"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("status") + assert.True(t, ok) + assert.EqualValues(t, "query_status-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "query_server-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + case "apachedruid.query.priority": + assert.False(t, validatedMetrics["apachedruid.query.priority"], "Found a duplicate in the metrics slice: apachedruid.query.priority") + validatedMetrics["apachedruid.query.priority"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Assigned lane and priority, only if Laning strategy is enabled. Refer to [Laning strategies](https,//druid.apache.org/docs/latest/configuration#laning-strategies).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("type") + assert.True(t, ok) + assert.EqualValues(t, "query_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "query_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("lane") + assert.True(t, ok) + assert.EqualValues(t, "query_lane-val", attrVal.Str()) + case "apachedruid.query.row_limit.exceeded.count": + assert.False(t, validatedMetrics["apachedruid.query.row_limit.exceeded.count"], "Found a duplicate in the metrics slice: apachedruid.query.row_limit.exceeded.count") + validatedMetrics["apachedruid.query.row_limit.exceeded.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of queries whose inlined subquery results exceeded the given row limit.", ms.At(i).Description()) + assert.Equal(t, "{queries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.segment.time": + assert.False(t, validatedMetrics["apachedruid.query.segment.time"], "Found a duplicate in the metrics slice: apachedruid.query.segment.time") + validatedMetrics["apachedruid.query.segment.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to query individual segment. Includes time to page in the segment from disk.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("status") + assert.True(t, ok) + assert.EqualValues(t, "query_status-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("segment") + assert.True(t, ok) + assert.EqualValues(t, "query_segment-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("vectorized") + assert.True(t, ok) + assert.EqualValues(t, "query_vectorized-val", attrVal.Str()) + case "apachedruid.query.segment_and_cache.time": + assert.False(t, validatedMetrics["apachedruid.query.segment_and_cache.time"], "Found a duplicate in the metrics slice: apachedruid.query.segment_and_cache.time") + validatedMetrics["apachedruid.query.segment_and_cache.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process).", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("segment") + assert.True(t, ok) + assert.EqualValues(t, "query_segment-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + case "apachedruid.query.segments.count": + assert.False(t, validatedMetrics["apachedruid.query.segments.count"], "Found a duplicate in the metrics slice: apachedruid.query.segments.count") + validatedMetrics["apachedruid.query.segments.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "This metric is not enabled by default. See the `QueryMetrics` Interface for reference regarding enabling this metric. Number of segments that will be touched by the query. In the broker, it makes a plan to distribute the query to realtime tasks and historicals based on a snapshot of segment distribution state. If there are some segments moved after this snapshot is created, certain historicals and realtime tasks can report those segments as missing to the broker. The broker will resend the query to the new servers that serve those segments after move. In this case, those segments can be counted more than once in this metric.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.success.count": + assert.False(t, validatedMetrics["apachedruid.query.success.count"], "Found a duplicate in the metrics slice: apachedruid.query.success.count") + validatedMetrics["apachedruid.query.success.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of queries successfully processed.", ms.At(i).Description()) + assert.Equal(t, "{queries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.time": + assert.False(t, validatedMetrics["apachedruid.query.time"], "Found a duplicate in the metrics slice: apachedruid.query.time") + validatedMetrics["apachedruid.query.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to complete a query.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "query_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("num_metrics") + assert.True(t, ok) + assert.EqualValues(t, "query_num_metrics-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("dimension") + assert.True(t, ok) + assert.EqualValues(t, "query_dimension-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("has_filters") + assert.True(t, ok) + assert.EqualValues(t, "query_has_filters-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("threshold") + assert.True(t, ok) + assert.EqualValues(t, 15, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("num_complex_metrics") + assert.True(t, ok) + assert.EqualValues(t, 25, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("type") + assert.True(t, ok) + assert.EqualValues(t, "query_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("remote_address") + assert.True(t, ok) + assert.EqualValues(t, "query_remote_address-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("context") + assert.True(t, ok) + assert.EqualValues(t, "query_context-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("num_dimensions") + assert.True(t, ok) + assert.EqualValues(t, "query_num_dimensions-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "query_interval-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("duration") + assert.True(t, ok) + assert.EqualValues(t, "query_duration-val", attrVal.Str()) + case "apachedruid.query.timeout.count": + assert.False(t, validatedMetrics["apachedruid.query.timeout.count"], "Found a duplicate in the metrics slice: apachedruid.query.timeout.count") + validatedMetrics["apachedruid.query.timeout.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of timed out queries.", ms.At(i).Description()) + assert.Equal(t, "{queries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.query.wait.time": + assert.False(t, validatedMetrics["apachedruid.query.wait.time"], "Found a duplicate in the metrics slice: apachedruid.query.wait.time") + validatedMetrics["apachedruid.query.wait.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds spent waiting for a segment to be scanned.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("segment") + assert.True(t, ok) + assert.EqualValues(t, "query_segment-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + case "apachedruid.segment.added.bytes": + assert.False(t, validatedMetrics["apachedruid.segment.added.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.added.bytes") + validatedMetrics["apachedruid.segment.added.bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Size in bytes of new segments created.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "segment_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "segment_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "segment_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "segment_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "segment_interval-val", attrVal.Str()) + case "apachedruid.segment.assign_skipped.count": + assert.False(t, validatedMetrics["apachedruid.segment.assign_skipped.count"], "Found a duplicate in the metrics slice: apachedruid.segment.assign_skipped.count") + validatedMetrics["apachedruid.segment.assign_skipped.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments that could not be assigned to any server for loading. This can occur due to replication throttling, no available disk space, or a full load queue.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("description") + assert.True(t, ok) + assert.EqualValues(t, "segment_description-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.assigned.count": + assert.False(t, validatedMetrics["apachedruid.segment.assigned.count"], "Found a duplicate in the metrics slice: apachedruid.segment.assigned.count") + validatedMetrics["apachedruid.segment.assigned.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of segments assigned to be loaded in the cluster.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.compacted.bytes": + assert.False(t, validatedMetrics["apachedruid.segment.compacted.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.compacted.bytes") + validatedMetrics["apachedruid.segment.compacted.bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total bytes of this datasource that are already compacted with the spec set in the auto compaction config.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.compacted.count": + assert.False(t, validatedMetrics["apachedruid.segment.compacted.count"], "Found a duplicate in the metrics slice: apachedruid.segment.compacted.count") + validatedMetrics["apachedruid.segment.compacted.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total number of segments of this datasource that are already compacted with the spec set in the auto compaction config.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.count": + assert.False(t, validatedMetrics["apachedruid.segment.count"], "Found a duplicate in the metrics slice: apachedruid.segment.count") + validatedMetrics["apachedruid.segment.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("priority") + assert.True(t, ok) + assert.EqualValues(t, "segment_priority-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.deleted.count": + assert.False(t, validatedMetrics["apachedruid.segment.deleted.count"], "Found a duplicate in the metrics slice: apachedruid.segment.deleted.count") + validatedMetrics["apachedruid.segment.deleted.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of segments marked as unused due to drop rules.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.drop_queue.count": + assert.False(t, validatedMetrics["apachedruid.segment.drop_queue.count"], "Found a duplicate in the metrics slice: apachedruid.segment.drop_queue.count") + validatedMetrics["apachedruid.segment.drop_queue.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments to drop.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "segment_server-val", attrVal.Str()) + case "apachedruid.segment.drop_skipped.count": + assert.False(t, validatedMetrics["apachedruid.segment.drop_skipped.count"], "Found a duplicate in the metrics slice: apachedruid.segment.drop_skipped.count") + validatedMetrics["apachedruid.segment.drop_skipped.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments that could not be dropped from any server.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("description") + assert.True(t, ok) + assert.EqualValues(t, "segment_description-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.dropped.count": + assert.False(t, validatedMetrics["apachedruid.segment.dropped.count"], "Found a duplicate in the metrics slice: apachedruid.segment.dropped.count") + validatedMetrics["apachedruid.segment.dropped.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of segments chosen to be dropped from the cluster due to being over-replicated.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.load_queue.assigned": + assert.False(t, validatedMetrics["apachedruid.segment.load_queue.assigned"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.assigned") + validatedMetrics["apachedruid.segment.load_queue.assigned"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of segments assigned for load or drop to the load queue of a server.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "segment_server-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.load_queue.cancelled": + assert.False(t, validatedMetrics["apachedruid.segment.load_queue.cancelled"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.cancelled") + validatedMetrics["apachedruid.segment.load_queue.cancelled"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of segment assignments that were canceled before completion.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "segment_server-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.load_queue.count": + assert.False(t, validatedMetrics["apachedruid.segment.load_queue.count"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.count") + validatedMetrics["apachedruid.segment.load_queue.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments to load.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "segment_server-val", attrVal.Str()) + case "apachedruid.segment.load_queue.failed": + assert.False(t, validatedMetrics["apachedruid.segment.load_queue.failed"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.failed") + validatedMetrics["apachedruid.segment.load_queue.failed"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of segment assignments that failed to complete.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "segment_server-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.load_queue.size": + assert.False(t, validatedMetrics["apachedruid.segment.load_queue.size"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.size") + validatedMetrics["apachedruid.segment.load_queue.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Size in bytes of segments to load.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "segment_server-val", attrVal.Str()) + case "apachedruid.segment.load_queue.success": + assert.False(t, validatedMetrics["apachedruid.segment.load_queue.success"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.success") + validatedMetrics["apachedruid.segment.load_queue.success"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of segment assignments that completed successfully.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "segment_server-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.max": + assert.False(t, validatedMetrics["apachedruid.segment.max"], "Found a duplicate in the metrics slice: apachedruid.segment.max") + validatedMetrics["apachedruid.segment.max"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Maximum byte limit available for segments.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.segment.move_skipped.count": + assert.False(t, validatedMetrics["apachedruid.segment.move_skipped.count"], "Found a duplicate in the metrics slice: apachedruid.segment.move_skipped.count") + validatedMetrics["apachedruid.segment.move_skipped.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments that were chosen for balancing but could not be moved. This can occur when segments are already optimally placed.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("description") + assert.True(t, ok) + assert.EqualValues(t, "segment_description-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.moved.bytes": + assert.False(t, validatedMetrics["apachedruid.segment.moved.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.moved.bytes") + validatedMetrics["apachedruid.segment.moved.bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Size in bytes of segments moved/archived via the Move Task.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "segment_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "segment_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "segment_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "segment_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "segment_interval-val", attrVal.Str()) + case "apachedruid.segment.moved.count": + assert.False(t, validatedMetrics["apachedruid.segment.moved.count"], "Found a duplicate in the metrics slice: apachedruid.segment.moved.count") + validatedMetrics["apachedruid.segment.moved.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of segments moved in the cluster.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.nuked.bytes": + assert.False(t, validatedMetrics["apachedruid.segment.nuked.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.nuked.bytes") + validatedMetrics["apachedruid.segment.nuked.bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Size in bytes of segments deleted via the Kill Task.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "segment_task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "segment_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "segment_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "segment_task_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "segment_interval-val", attrVal.Str()) + case "apachedruid.segment.over_shadowed.count": + assert.False(t, validatedMetrics["apachedruid.segment.over_shadowed.count"], "Found a duplicate in the metrics slice: apachedruid.segment.over_shadowed.count") + validatedMetrics["apachedruid.segment.over_shadowed.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments marked as unused due to being overshadowed.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.segment.pending_delete": + assert.False(t, validatedMetrics["apachedruid.segment.pending_delete"], "Found a duplicate in the metrics slice: apachedruid.segment.pending_delete") + validatedMetrics["apachedruid.segment.pending_delete"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "On-disk size in bytes of segments that are waiting to be cleared out.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.segment.row_count.avg": + assert.False(t, validatedMetrics["apachedruid.segment.row_count.avg"], "Found a duplicate in the metrics slice: apachedruid.segment.row_count.avg") + validatedMetrics["apachedruid.segment.row_count.avg"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The average number of rows per segment on a historical. `SegmentStatsMonitor` must be enabled.", ms.At(i).Description()) + assert.Equal(t, "{rows}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("priority") + assert.True(t, ok) + assert.EqualValues(t, "segment_priority-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.row_count.range.count": + assert.False(t, validatedMetrics["apachedruid.segment.row_count.range.count"], "Found a duplicate in the metrics slice: apachedruid.segment.row_count.range.count") + validatedMetrics["apachedruid.segment.row_count.range.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of segments in a bucket. `SegmentStatsMonitor` must be enabled.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("priority") + assert.True(t, ok) + assert.EqualValues(t, "segment_priority-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("range") + assert.True(t, ok) + assert.EqualValues(t, "segment_range-val", attrVal.Str()) + case "apachedruid.segment.scan.active": + assert.False(t, validatedMetrics["apachedruid.segment.scan.active"], "Found a duplicate in the metrics slice: apachedruid.segment.scan.active") + validatedMetrics["apachedruid.segment.scan.active"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments currently scanned. This metric also indicates how many threads from `druid.processing.numThreads` are currently being used.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.segment.scan.pending": + assert.False(t, validatedMetrics["apachedruid.segment.scan.pending"], "Found a duplicate in the metrics slice: apachedruid.segment.scan.pending") + validatedMetrics["apachedruid.segment.scan.pending"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments in queue waiting to be scanned.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.segment.size": + assert.False(t, validatedMetrics["apachedruid.segment.size"], "Found a duplicate in the metrics slice: apachedruid.segment.size") + validatedMetrics["apachedruid.segment.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.skip_compact.bytes": + assert.False(t, validatedMetrics["apachedruid.segment.skip_compact.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.skip_compact.bytes") + validatedMetrics["apachedruid.segment.skip_compact.bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total bytes of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.skip_compact.count": + assert.False(t, validatedMetrics["apachedruid.segment.skip_compact.count"], "Found a duplicate in the metrics slice: apachedruid.segment.skip_compact.count") + validatedMetrics["apachedruid.segment.skip_compact.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of segments of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.unavailable.count": + assert.False(t, validatedMetrics["apachedruid.segment.unavailable.count"], "Found a duplicate in the metrics slice: apachedruid.segment.unavailable.count") + validatedMetrics["apachedruid.segment.unavailable.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of unique segments left to load until all used segments are available for queries.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.under_replicated.count": + assert.False(t, validatedMetrics["apachedruid.segment.under_replicated.count"], "Found a duplicate in the metrics slice: apachedruid.segment.under_replicated.count") + validatedMetrics["apachedruid.segment.under_replicated.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments, including replicas, left to load until all used segments are available for queries.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.unneeded.count": + assert.False(t, validatedMetrics["apachedruid.segment.unneeded.count"], "Found a duplicate in the metrics slice: apachedruid.segment.unneeded.count") + validatedMetrics["apachedruid.segment.unneeded.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of segments dropped due to being marked as unused.", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.used": + assert.False(t, validatedMetrics["apachedruid.segment.used"], "Found a duplicate in the metrics slice: apachedruid.segment.used") + validatedMetrics["apachedruid.segment.used"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Bytes used for served segments.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("priority") + assert.True(t, ok) + assert.EqualValues(t, "segment_priority-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.used_percent": + assert.False(t, validatedMetrics["apachedruid.segment.used_percent"], "Found a duplicate in the metrics slice: apachedruid.segment.used_percent") + validatedMetrics["apachedruid.segment.used_percent"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Percentage of space used by served segments.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("priority") + assert.True(t, ok) + assert.EqualValues(t, "segment_priority-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "segment_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.wait_compact.bytes": + assert.False(t, validatedMetrics["apachedruid.segment.wait_compact.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.wait_compact.bytes") + validatedMetrics["apachedruid.segment.wait_compact.bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total bytes of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.segment.wait_compact.count": + assert.False(t, validatedMetrics["apachedruid.segment.wait_compact.count"], "Found a duplicate in the metrics slice: apachedruid.segment.wait_compact.count") + validatedMetrics["apachedruid.segment.wait_compact.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of segments of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).", ms.At(i).Description()) + assert.Equal(t, "{segments}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) + case "apachedruid.serverview.init.time": + assert.False(t, validatedMetrics["apachedruid.serverview.init.time"], "Found a duplicate in the metrics slice: apachedruid.serverview.init.time") + validatedMetrics["apachedruid.serverview.init.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time taken to initialize the broker server view. Useful to detect if brokers are taking too long to start.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.serverview.sync.healthy": + assert.False(t, validatedMetrics["apachedruid.serverview.sync.healthy"], "Found a duplicate in the metrics slice: apachedruid.serverview.sync.healthy") + validatedMetrics["apachedruid.serverview.sync.healthy"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Sync status of the Broker with a segment-loading server such as a Historical or Peon. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. This metric can be used in conjunction with `serverview/sync/unstableTime` to debug slow startup of Brokers.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "serverview_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "serverview_server-val", attrVal.Str()) + case "apachedruid.serverview.sync.unstable_time": + assert.False(t, validatedMetrics["apachedruid.serverview.sync.unstable_time"], "Found a duplicate in the metrics slice: apachedruid.serverview.sync.unstable_time") + validatedMetrics["apachedruid.serverview.sync.unstable_time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time in milliseconds for which the Broker has been failing to sync with a segment-loading server. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "serverview_tier-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("server") + assert.True(t, ok) + assert.EqualValues(t, "serverview_server-val", attrVal.Str()) + case "apachedruid.sql_query.bytes": + assert.False(t, validatedMetrics["apachedruid.sql_query.bytes"], "Found a duplicate in the metrics slice: apachedruid.sql_query.bytes") + validatedMetrics["apachedruid.sql_query.bytes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of bytes returned in the SQL query response.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("native_query_ids") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_native_query_ids-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("engine") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_engine-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("remote_address") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_remote_address-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("success") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_success-val", attrVal.Str()) + case "apachedruid.sql_query.planning_time_ms": + assert.False(t, validatedMetrics["apachedruid.sql_query.planning_time_ms"], "Found a duplicate in the metrics slice: apachedruid.sql_query.planning_time_ms") + validatedMetrics["apachedruid.sql_query.planning_time_ms"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to plan a SQL to native query.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("native_query_ids") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_native_query_ids-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("engine") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_engine-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("remote_address") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_remote_address-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("success") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_success-val", attrVal.Str()) + case "apachedruid.sql_query.time": + assert.False(t, validatedMetrics["apachedruid.sql_query.time"], "Found a duplicate in the metrics slice: apachedruid.sql_query.time") + validatedMetrics["apachedruid.sql_query.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to complete a SQL query.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("native_query_ids") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_native_query_ids-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("engine") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_engine-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("remote_address") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_remote_address-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("id") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("success") + assert.True(t, ok) + assert.EqualValues(t, "sqlQuery_success-val", attrVal.Str()) + case "apachedruid.subquery.byte_limit.count": + assert.False(t, validatedMetrics["apachedruid.subquery.byte_limit.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.byte_limit.count") + validatedMetrics["apachedruid.subquery.byte_limit.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of subqueries whose results are materialized as frames (Druid's internal byte representation of rows).", ms.At(i).Description()) + assert.Equal(t, "{subqueries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.subquery.fallback.count": + assert.False(t, validatedMetrics["apachedruid.subquery.fallback.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.fallback.count") + validatedMetrics["apachedruid.subquery.fallback.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of subqueries which cannot be materialized as frames.", ms.At(i).Description()) + assert.Equal(t, "{subqueries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.subquery.fallback.insufficient_type.count": + assert.False(t, validatedMetrics["apachedruid.subquery.fallback.insufficient_type.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.fallback.insufficient_type.count") + validatedMetrics["apachedruid.subquery.fallback.insufficient_type.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of subqueries which cannot be materialized as frames due to insufficient type information in the row signature.", ms.At(i).Description()) + assert.Equal(t, "{subqueries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.subquery.fallback.unknown_reason.count": + assert.False(t, validatedMetrics["apachedruid.subquery.fallback.unknown_reason.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.fallback.unknown_reason.count") + validatedMetrics["apachedruid.subquery.fallback.unknown_reason.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of subqueries which cannot be materialized as frames due other reasons.", ms.At(i).Description()) + assert.Equal(t, "{subqueries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.subquery.row_limit.count": + assert.False(t, validatedMetrics["apachedruid.subquery.row_limit.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.row_limit.count") + validatedMetrics["apachedruid.subquery.row_limit.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of subqueries whose results are materialized as rows (Java objects on heap).", ms.At(i).Description()) + assert.Equal(t, "{subqueries}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.cpu": + assert.False(t, validatedMetrics["apachedruid.sys.cpu"], "Found a duplicate in the metrics slice: apachedruid.sys.cpu") + validatedMetrics["apachedruid.sys.cpu"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "CPU used.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("cpu_time") + assert.True(t, ok) + assert.EqualValues(t, "sys_cpu_time-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("cpu_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_cpu_name-val", attrVal.Str()) + case "apachedruid.sys.disk.queue": + assert.False(t, validatedMetrics["apachedruid.sys.disk.queue"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.queue") + validatedMetrics["apachedruid.sys.disk.queue"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Disk queue length. Measures number of requests waiting to be processed by disk.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("disk_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) + case "apachedruid.sys.disk.read.count": + assert.False(t, validatedMetrics["apachedruid.sys.disk.read.count"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.read.count") + validatedMetrics["apachedruid.sys.disk.read.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Reads from disk.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("disk_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) + case "apachedruid.sys.disk.read.size": + assert.False(t, validatedMetrics["apachedruid.sys.disk.read.size"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.read.size") + validatedMetrics["apachedruid.sys.disk.read.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Bytes read from disk. One indicator of the amount of paging occurring for segments.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("disk_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) + case "apachedruid.sys.disk.transfer_time": + assert.False(t, validatedMetrics["apachedruid.sys.disk.transfer_time"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.transfer_time") + validatedMetrics["apachedruid.sys.disk.transfer_time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Transfer time to read from or write to disk.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("disk_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) + case "apachedruid.sys.disk.write.count": + assert.False(t, validatedMetrics["apachedruid.sys.disk.write.count"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.write.count") + validatedMetrics["apachedruid.sys.disk.write.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Writes to disk.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("disk_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) + case "apachedruid.sys.disk.write.size": + assert.False(t, validatedMetrics["apachedruid.sys.disk.write.size"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.write.size") + validatedMetrics["apachedruid.sys.disk.write.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Bytes written to disk. One indicator of the amount of paging occurring for segments.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("disk_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) + case "apachedruid.sys.fs.files.count": + assert.False(t, validatedMetrics["apachedruid.sys.fs.files.count"], "Found a duplicate in the metrics slice: apachedruid.sys.fs.files.count") + validatedMetrics["apachedruid.sys.fs.files.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Filesystem total IO nodes.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("fs_dir_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("fs_dev_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dev_name-val", attrVal.Str()) + case "apachedruid.sys.fs.files.free": + assert.False(t, validatedMetrics["apachedruid.sys.fs.files.free"], "Found a duplicate in the metrics slice: apachedruid.sys.fs.files.free") + validatedMetrics["apachedruid.sys.fs.files.free"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Filesystem free IO nodes.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("fs_dir_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("fs_dev_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dev_name-val", attrVal.Str()) + case "apachedruid.sys.fs.max": + assert.False(t, validatedMetrics["apachedruid.sys.fs.max"], "Found a duplicate in the metrics slice: apachedruid.sys.fs.max") + validatedMetrics["apachedruid.sys.fs.max"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Filesystem bytes max.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("fs_dir_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("fs_dev_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dev_name-val", attrVal.Str()) + case "apachedruid.sys.fs.used": + assert.False(t, validatedMetrics["apachedruid.sys.fs.used"], "Found a duplicate in the metrics slice: apachedruid.sys.fs.used") + validatedMetrics["apachedruid.sys.fs.used"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Filesystem bytes used.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("fs_dir_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("fs_dev_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dev_name-val", attrVal.Str()) + case "apachedruid.sys.la.1": + assert.False(t, validatedMetrics["apachedruid.sys.la.1"], "Found a duplicate in the metrics slice: apachedruid.sys.la.1") + validatedMetrics["apachedruid.sys.la.1"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "System CPU load averages over past `i` minutes, where `i={1,5,15}`.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.la.15": + assert.False(t, validatedMetrics["apachedruid.sys.la.15"], "Found a duplicate in the metrics slice: apachedruid.sys.la.15") + validatedMetrics["apachedruid.sys.la.15"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "System CPU load averages over past `i` minutes, where `i={1,5,15}`.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.la.5": + assert.False(t, validatedMetrics["apachedruid.sys.la.5"], "Found a duplicate in the metrics slice: apachedruid.sys.la.5") + validatedMetrics["apachedruid.sys.la.5"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "System CPU load averages over past `i` minutes, where `i={1,5,15}`.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.mem.free": + assert.False(t, validatedMetrics["apachedruid.sys.mem.free"], "Found a duplicate in the metrics slice: apachedruid.sys.mem.free") + validatedMetrics["apachedruid.sys.mem.free"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Memory free.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.mem.max": + assert.False(t, validatedMetrics["apachedruid.sys.mem.max"], "Found a duplicate in the metrics slice: apachedruid.sys.mem.max") + validatedMetrics["apachedruid.sys.mem.max"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Memory max.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.mem.used": + assert.False(t, validatedMetrics["apachedruid.sys.mem.used"], "Found a duplicate in the metrics slice: apachedruid.sys.mem.used") + validatedMetrics["apachedruid.sys.mem.used"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Memory used.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.net.read.dropped": + assert.False(t, validatedMetrics["apachedruid.sys.net.read.dropped"], "Found a duplicate in the metrics slice: apachedruid.sys.net.read.dropped") + validatedMetrics["apachedruid.sys.net.read.dropped"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total packets dropped coming from network.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("net_hwaddr") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_address") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) + case "apachedruid.sys.net.read.errors": + assert.False(t, validatedMetrics["apachedruid.sys.net.read.errors"], "Found a duplicate in the metrics slice: apachedruid.sys.net.read.errors") + validatedMetrics["apachedruid.sys.net.read.errors"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total network read errors.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("net_hwaddr") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_address") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) + case "apachedruid.sys.net.read.packets": + assert.False(t, validatedMetrics["apachedruid.sys.net.read.packets"], "Found a duplicate in the metrics slice: apachedruid.sys.net.read.packets") + validatedMetrics["apachedruid.sys.net.read.packets"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total packets read from the network.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("net_hwaddr") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_address") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) + case "apachedruid.sys.net.read.size": + assert.False(t, validatedMetrics["apachedruid.sys.net.read.size"], "Found a duplicate in the metrics slice: apachedruid.sys.net.read.size") + validatedMetrics["apachedruid.sys.net.read.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Bytes read from the network.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("net_hwaddr") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_address") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) + case "apachedruid.sys.net.write.collisions": + assert.False(t, validatedMetrics["apachedruid.sys.net.write.collisions"], "Found a duplicate in the metrics slice: apachedruid.sys.net.write.collisions") + validatedMetrics["apachedruid.sys.net.write.collisions"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total network write collisions.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("net_hwaddr") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_address") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) + case "apachedruid.sys.net.write.errors": + assert.False(t, validatedMetrics["apachedruid.sys.net.write.errors"], "Found a duplicate in the metrics slice: apachedruid.sys.net.write.errors") + validatedMetrics["apachedruid.sys.net.write.errors"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total network write errors.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("net_hwaddr") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_address") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) + case "apachedruid.sys.net.write.packets": + assert.False(t, validatedMetrics["apachedruid.sys.net.write.packets"], "Found a duplicate in the metrics slice: apachedruid.sys.net.write.packets") + validatedMetrics["apachedruid.sys.net.write.packets"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total packets written to the network.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("net_hwaddr") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_address") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) + case "apachedruid.sys.net.write.size": + assert.False(t, validatedMetrics["apachedruid.sys.net.write.size"], "Found a duplicate in the metrics slice: apachedruid.sys.net.write.size") + validatedMetrics["apachedruid.sys.net.write.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Bytes written to the network.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("net_hwaddr") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("net_address") + assert.True(t, ok) + assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) + case "apachedruid.sys.storage.used": + assert.False(t, validatedMetrics["apachedruid.sys.storage.used"], "Found a duplicate in the metrics slice: apachedruid.sys.storage.used") + validatedMetrics["apachedruid.sys.storage.used"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Disk space used.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("fs_dir_name") + assert.True(t, ok) + assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) + case "apachedruid.sys.swap.free": + assert.False(t, validatedMetrics["apachedruid.sys.swap.free"], "Found a duplicate in the metrics slice: apachedruid.sys.swap.free") + validatedMetrics["apachedruid.sys.swap.free"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Free swap.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.swap.max": + assert.False(t, validatedMetrics["apachedruid.sys.swap.max"], "Found a duplicate in the metrics slice: apachedruid.sys.swap.max") + validatedMetrics["apachedruid.sys.swap.max"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Max swap.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.swap.page_in": + assert.False(t, validatedMetrics["apachedruid.sys.swap.page_in"], "Found a duplicate in the metrics slice: apachedruid.sys.swap.page_in") + validatedMetrics["apachedruid.sys.swap.page_in"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Paged in swap.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.swap.page_out": + assert.False(t, validatedMetrics["apachedruid.sys.swap.page_out"], "Found a duplicate in the metrics slice: apachedruid.sys.swap.page_out") + validatedMetrics["apachedruid.sys.swap.page_out"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Paged out swap.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.active_opens": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.active_opens"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.active_opens") + validatedMetrics["apachedruid.sys.tcpv4.active_opens"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total TCP active open connections.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.attempt_fails": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.attempt_fails"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.attempt_fails") + validatedMetrics["apachedruid.sys.tcpv4.attempt_fails"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total TCP active connection failures.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.estab_resets": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.estab_resets"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.estab_resets") + validatedMetrics["apachedruid.sys.tcpv4.estab_resets"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total TCP connection resets.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.in.errs": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.in.errs"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.in.errs") + validatedMetrics["apachedruid.sys.tcpv4.in.errs"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Errors while reading segments.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.in.segs": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.in.segs"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.in.segs") + validatedMetrics["apachedruid.sys.tcpv4.in.segs"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total segments received in connection.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.out.rsts": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.out.rsts"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.out.rsts") + validatedMetrics["apachedruid.sys.tcpv4.out.rsts"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total `out reset` packets sent to reset the connection.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.out.segs": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.out.segs"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.out.segs") + validatedMetrics["apachedruid.sys.tcpv4.out.segs"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total segments sent.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.passive_opens": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.passive_opens"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.passive_opens") + validatedMetrics["apachedruid.sys.tcpv4.passive_opens"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total TCP passive open connections.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.tcpv4.retrans.segs": + assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.retrans.segs"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.retrans.segs") + validatedMetrics["apachedruid.sys.tcpv4.retrans.segs"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total segments re-transmitted.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.sys.uptime": + assert.False(t, validatedMetrics["apachedruid.sys.uptime"], "Found a duplicate in the metrics slice: apachedruid.sys.uptime") + validatedMetrics["apachedruid.sys.uptime"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total system uptime.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.task.action.batch.attempts": + assert.False(t, validatedMetrics["apachedruid.task.action.batch.attempts"], "Found a duplicate in the metrics slice: apachedruid.task.action.batch.attempts") + validatedMetrics["apachedruid.task.action.batch.attempts"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of execution attempts for a single batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) + assert.Equal(t, "{attempts}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "task_interval-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_action_type") + assert.True(t, ok) + assert.EqualValues(t, "task_action_type-val", attrVal.Str()) + case "apachedruid.task.action.batch.queue_time": + assert.False(t, validatedMetrics["apachedruid.task.action.batch.queue_time"], "Found a duplicate in the metrics slice: apachedruid.task.action.batch.queue_time") + validatedMetrics["apachedruid.task.action.batch.queue_time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds spent by a batch of task actions in queue. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "task_interval-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_action_type") + assert.True(t, ok) + assert.EqualValues(t, "task_action_type-val", attrVal.Str()) + case "apachedruid.task.action.batch.run_time": + assert.False(t, validatedMetrics["apachedruid.task.action.batch.run_time"], "Found a duplicate in the metrics slice: apachedruid.task.action.batch.run_time") + validatedMetrics["apachedruid.task.action.batch.run_time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to execute a batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "task_interval-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_action_type") + assert.True(t, ok) + assert.EqualValues(t, "task_action_type-val", attrVal.Str()) + case "apachedruid.task.action.batch.size": + assert.False(t, validatedMetrics["apachedruid.task.action.batch.size"], "Found a duplicate in the metrics slice: apachedruid.task.action.batch.size") + validatedMetrics["apachedruid.task.action.batch.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of task actions in a batch that was executed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) + assert.Equal(t, "{actions}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interval") + assert.True(t, ok) + assert.EqualValues(t, "task_interval-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_action_type") + assert.True(t, ok) + assert.EqualValues(t, "task_action_type-val", attrVal.Str()) + case "apachedruid.task.action.failed.count": + assert.False(t, validatedMetrics["apachedruid.task.action.failed.count"], "Found a duplicate in the metrics slice: apachedruid.task.action.failed.count") + validatedMetrics["apachedruid.task.action.failed.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of task actions that failed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) + assert.Equal(t, "{actions}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_action_type") + assert.True(t, ok) + assert.EqualValues(t, "task_action_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "task_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "task_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "task_id-val", attrVal.Str()) + case "apachedruid.task.action.log.time": + assert.False(t, validatedMetrics["apachedruid.task.action.log.time"], "Found a duplicate in the metrics slice: apachedruid.task.action.log.time") + validatedMetrics["apachedruid.task.action.log.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to log a task action to the audit log.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_action_type") + assert.True(t, ok) + assert.EqualValues(t, "task_action_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "task_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "task_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "task_id-val", attrVal.Str()) + case "apachedruid.task.action.run.time": + assert.False(t, validatedMetrics["apachedruid.task.action.run.time"], "Found a duplicate in the metrics slice: apachedruid.task.action.run.time") + validatedMetrics["apachedruid.task.action.run.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to execute a task action.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_action_type") + assert.True(t, ok) + assert.EqualValues(t, "task_action_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "task_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "task_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "task_id-val", attrVal.Str()) + case "apachedruid.task.action.success.count": + assert.False(t, validatedMetrics["apachedruid.task.action.success.count"], "Found a duplicate in the metrics slice: apachedruid.task.action.success.count") + validatedMetrics["apachedruid.task.action.success.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of task actions that were executed successfully during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) + assert.Equal(t, "{actions}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_action_type") + assert.True(t, ok) + assert.EqualValues(t, "task_action_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "task_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "task_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "task_id-val", attrVal.Str()) + case "apachedruid.task.failed.count": + assert.False(t, validatedMetrics["apachedruid.task.failed.count"], "Found a duplicate in the metrics slice: apachedruid.task.failed.count") + validatedMetrics["apachedruid.task.failed.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of failed tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + case "apachedruid.task.pending.count": + assert.False(t, validatedMetrics["apachedruid.task.pending.count"], "Found a duplicate in the metrics slice: apachedruid.task.pending.count") + validatedMetrics["apachedruid.task.pending.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of current pending tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + case "apachedruid.task.pending.time": + assert.False(t, validatedMetrics["apachedruid.task.pending.time"], "Found a duplicate in the metrics slice: apachedruid.task.pending.time") + validatedMetrics["apachedruid.task.pending.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken for a task to wait for running.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "task_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "task_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "task_id-val", attrVal.Str()) + case "apachedruid.task.run.time": + assert.False(t, validatedMetrics["apachedruid.task.run.time"], "Found a duplicate in the metrics slice: apachedruid.task.run.time") + validatedMetrics["apachedruid.task.run.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Milliseconds taken to run a task.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "task_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_status") + assert.True(t, ok) + assert.EqualValues(t, "task_status-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "task_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "task_id-val", attrVal.Str()) + case "apachedruid.task.running.count": + assert.False(t, validatedMetrics["apachedruid.task.running.count"], "Found a duplicate in the metrics slice: apachedruid.task.running.count") + validatedMetrics["apachedruid.task.running.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of current running tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + case "apachedruid.task.segment_availability.wait.time": + assert.False(t, validatedMetrics["apachedruid.task.segment_availability.wait.time"], "Found a duplicate in the metrics slice: apachedruid.task.segment_availability.wait.time") + validatedMetrics["apachedruid.task.segment_availability.wait.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The amount of milliseconds a batch indexing task waited for newly created segments to become available for querying.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("task_type") + assert.True(t, ok) + assert.EqualValues(t, "task_type-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("group_id") + assert.True(t, ok) + assert.EqualValues(t, "task_group_id-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("segment_availability_confirmed") + assert.True(t, ok) + assert.EqualValues(t, "task_segment_availability_confirmed-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("tags") + assert.True(t, ok) + assert.EqualValues(t, "task_tags-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("task_id") + assert.True(t, ok) + assert.EqualValues(t, "task_id-val", attrVal.Str()) + case "apachedruid.task.success.count": + assert.False(t, validatedMetrics["apachedruid.task.success.count"], "Found a duplicate in the metrics slice: apachedruid.task.success.count") + validatedMetrics["apachedruid.task.success.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of successful tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + case "apachedruid.task.waiting.count": + assert.False(t, validatedMetrics["apachedruid.task.waiting.count"], "Found a duplicate in the metrics slice: apachedruid.task.waiting.count") + validatedMetrics["apachedruid.task.waiting.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of current waiting tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("data_source") + assert.True(t, ok) + assert.EqualValues(t, "task_data_source-val", attrVal.Str()) + case "apachedruid.task_slot.blacklisted.count": + assert.False(t, validatedMetrics["apachedruid.task_slot.blacklisted.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.blacklisted.count") + validatedMetrics["apachedruid.task_slot.blacklisted.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of total task slots in blacklisted Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) + case "apachedruid.task_slot.idle.count": + assert.False(t, validatedMetrics["apachedruid.task_slot.idle.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.idle.count") + validatedMetrics["apachedruid.task_slot.idle.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of idle task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) + case "apachedruid.task_slot.lazy.count": + assert.False(t, validatedMetrics["apachedruid.task_slot.lazy.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.lazy.count") + validatedMetrics["apachedruid.task_slot.lazy.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of total task slots in lazy marked Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) + case "apachedruid.task_slot.total.count": + assert.False(t, validatedMetrics["apachedruid.task_slot.total.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.total.count") + validatedMetrics["apachedruid.task_slot.total.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of total task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) + case "apachedruid.task_slot.used.count": + assert.False(t, validatedMetrics["apachedruid.task_slot.used.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.used.count") + validatedMetrics["apachedruid.task_slot.used.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of busy task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) + case "apachedruid.tier.historical.count": + assert.False(t, validatedMetrics["apachedruid.tier.historical.count"], "Found a duplicate in the metrics slice: apachedruid.tier.historical.count") + validatedMetrics["apachedruid.tier.historical.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of available historical nodes in each tier.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "tier-val", attrVal.Str()) + case "apachedruid.tier.replication.factor": + assert.False(t, validatedMetrics["apachedruid.tier.replication.factor"], "Found a duplicate in the metrics slice: apachedruid.tier.replication.factor") + validatedMetrics["apachedruid.tier.replication.factor"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Configured maximum replication factor in each tier.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "tier-val", attrVal.Str()) + case "apachedruid.tier.required.capacity": + assert.False(t, validatedMetrics["apachedruid.tier.required.capacity"], "Found a duplicate in the metrics slice: apachedruid.tier.required.capacity") + validatedMetrics["apachedruid.tier.required.capacity"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total capacity in bytes required in each tier.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "tier-val", attrVal.Str()) + case "apachedruid.tier.total.capacity": + assert.False(t, validatedMetrics["apachedruid.tier.total.capacity"], "Found a duplicate in the metrics slice: apachedruid.tier.total.capacity") + validatedMetrics["apachedruid.tier.total.capacity"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total capacity in bytes available in each tier.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("tier") + assert.True(t, ok) + assert.EqualValues(t, "tier-val", attrVal.Str()) + case "apachedruid.worker.task.failed.count": + assert.False(t, validatedMetrics["apachedruid.worker.task.failed.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task.failed.count") + validatedMetrics["apachedruid.worker.task.failed.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of failed tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "worker_category-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("worker_version") + assert.True(t, ok) + assert.EqualValues(t, "worker_version-val", attrVal.Str()) + case "apachedruid.worker.task.success.count": + assert.False(t, validatedMetrics["apachedruid.worker.task.success.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task.success.count") + validatedMetrics["apachedruid.worker.task.success.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of successful tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.", ms.At(i).Description()) + assert.Equal(t, "{tasks}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "worker_category-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("worker_version") + assert.True(t, ok) + assert.EqualValues(t, "worker_version-val", attrVal.Str()) + case "apachedruid.worker.task_slot.idle.count": + assert.False(t, validatedMetrics["apachedruid.worker.task_slot.idle.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task_slot.idle.count") + validatedMetrics["apachedruid.worker.task_slot.idle.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of idle task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "worker_category-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("worker_version") + assert.True(t, ok) + assert.EqualValues(t, "worker_version-val", attrVal.Str()) + case "apachedruid.worker.task_slot.total.count": + assert.False(t, validatedMetrics["apachedruid.worker.task_slot.total.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task_slot.total.count") + validatedMetrics["apachedruid.worker.task_slot.total.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of total task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "worker_category-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("worker_version") + assert.True(t, ok) + assert.EqualValues(t, "worker_version-val", attrVal.Str()) + case "apachedruid.worker.task_slot.used.count": + assert.False(t, validatedMetrics["apachedruid.worker.task_slot.used.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task_slot.used.count") + validatedMetrics["apachedruid.worker.task_slot.used.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of busy task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included.", ms.At(i).Description()) + assert.Equal(t, "{slots}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("category") + assert.True(t, ok) + assert.EqualValues(t, "worker_category-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("worker_version") + assert.True(t, ok) + assert.EqualValues(t, "worker_version-val", attrVal.Str()) + case "apachedruid.zk.connected": + assert.False(t, validatedMetrics["apachedruid.zk.connected"], "Found a duplicate in the metrics slice: apachedruid.zk.connected") + validatedMetrics["apachedruid.zk.connected"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Indicator of connection status. `1` for connected, `0` for disconnected. Emitted once per monitor period.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "apachedruid.zk.reconnect.time": + assert.False(t, validatedMetrics["apachedruid.zk.reconnect.time"], "Found a duplicate in the metrics slice: apachedruid.zk.reconnect.time") + validatedMetrics["apachedruid.zk.reconnect.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Amount of time, in milliseconds, that a server was disconnected from ZooKeeper before reconnecting. Emitted on reconnection. Not emitted if connection to ZooKeeper is permanently lost, because in this case, there is no reconnection.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + } + } + }) + } +} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_resource.go b/receiver/apachedruidreceiver/internal/metadata/generated_resource.go new file mode 100644 index 0000000000000..98fa1566e4515 --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/generated_resource.go @@ -0,0 +1,50 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml. +// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines. +type ResourceBuilder struct { + config ResourceAttributesConfig + res pcommon.Resource +} + +// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application. +func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder { + return &ResourceBuilder{ + config: rac, + res: pcommon.NewResource(), + } +} + +// SetApachedruidClusterName sets provided value as "apachedruid.cluster.name" attribute. +func (rb *ResourceBuilder) SetApachedruidClusterName(val string) { + if rb.config.ApachedruidClusterName.Enabled { + rb.res.Attributes().PutStr("apachedruid.cluster.name", val) + } +} + +// SetApachedruidNodeHost sets provided value as "apachedruid.node.host" attribute. +func (rb *ResourceBuilder) SetApachedruidNodeHost(val string) { + if rb.config.ApachedruidNodeHost.Enabled { + rb.res.Attributes().PutStr("apachedruid.node.host", val) + } +} + +// SetApachedruidNodeService sets provided value as "apachedruid.node.service" attribute. +func (rb *ResourceBuilder) SetApachedruidNodeService(val string) { + if rb.config.ApachedruidNodeService.Enabled { + rb.res.Attributes().PutStr("apachedruid.node.service", val) + } +} + +// Emit returns the built resource and resets the internal builder state. +func (rb *ResourceBuilder) Emit() pcommon.Resource { + r := rb.res + rb.res = pcommon.NewResource() + return r +} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_resource_test.go b/receiver/apachedruidreceiver/internal/metadata/generated_resource_test.go new file mode 100644 index 0000000000000..12ebc19c48eda --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/generated_resource_test.go @@ -0,0 +1,52 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResourceBuilder(t *testing.T) { + for _, test := range []string{"default", "all_set", "none_set"} { + t.Run(test, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, test) + rb := NewResourceBuilder(cfg) + rb.SetApachedruidClusterName("apachedruid.cluster.name-val") + rb.SetApachedruidNodeHost("apachedruid.node.host-val") + rb.SetApachedruidNodeService("apachedruid.node.service-val") + + res := rb.Emit() + assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource + + switch test { + case "default": + assert.Equal(t, 3, res.Attributes().Len()) + case "all_set": + assert.Equal(t, 3, res.Attributes().Len()) + case "none_set": + assert.Equal(t, 0, res.Attributes().Len()) + return + default: + assert.Failf(t, "unexpected test case: %s", test) + } + + val, ok := res.Attributes().Get("apachedruid.cluster.name") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "apachedruid.cluster.name-val", val.Str()) + } + val, ok = res.Attributes().Get("apachedruid.node.host") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "apachedruid.node.host-val", val.Str()) + } + val, ok = res.Attributes().Get("apachedruid.node.service") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "apachedruid.node.service-val", val.Str()) + } + }) + } +} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_status.go b/receiver/apachedruidreceiver/internal/metadata/generated_status.go new file mode 100644 index 0000000000000..d1ec0263cd299 --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/generated_status.go @@ -0,0 +1,26 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +var ( + Type = component.MustNewType("apachedruid") +) + +const ( + MetricsStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelDevelopment +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver") +} diff --git a/receiver/apachedruidreceiver/internal/metadata/testdata/config.yaml b/receiver/apachedruidreceiver/internal/metadata/testdata/config.yaml new file mode 100644 index 0000000000000..71d65b6a141c0 --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/testdata/config.yaml @@ -0,0 +1,999 @@ +default: +all_set: + metrics: + apachedruid.compact.segment_analyzer.fetch_and_process_millis: + enabled: true + apachedruid.compact.task.count: + enabled: true + apachedruid.compact_task.available_slot.count: + enabled: true + apachedruid.compact_task.max_slot.count: + enabled: true + apachedruid.coordinator.global.time: + enabled: true + apachedruid.coordinator.time: + enabled: true + apachedruid.ingest.bytes.received: + enabled: true + apachedruid.ingest.count: + enabled: true + apachedruid.ingest.events.buffered: + enabled: true + apachedruid.ingest.events.duplicate: + enabled: true + apachedruid.ingest.events.message_gap: + enabled: true + apachedruid.ingest.events.processed: + enabled: true + apachedruid.ingest.events.processed_with_error: + enabled: true + apachedruid.ingest.events.thrown_away: + enabled: true + apachedruid.ingest.events.unparseable: + enabled: true + apachedruid.ingest.handoff.count: + enabled: true + apachedruid.ingest.handoff.failed: + enabled: true + apachedruid.ingest.handoff.time: + enabled: true + apachedruid.ingest.input.bytes: + enabled: true + apachedruid.ingest.kafka.avg_lag: + enabled: true + apachedruid.ingest.kafka.lag: + enabled: true + apachedruid.ingest.kafka.max_lag: + enabled: true + apachedruid.ingest.kafka.partition_lag: + enabled: true + apachedruid.ingest.kinesis.avg_lag.time: + enabled: true + apachedruid.ingest.kinesis.lag.time: + enabled: true + apachedruid.ingest.kinesis.max_lag.time: + enabled: true + apachedruid.ingest.kinesis.partition_lag.time: + enabled: true + apachedruid.ingest.merge.cpu: + enabled: true + apachedruid.ingest.merge.time: + enabled: true + apachedruid.ingest.notices.queue_size: + enabled: true + apachedruid.ingest.notices.time: + enabled: true + apachedruid.ingest.pause.time: + enabled: true + apachedruid.ingest.persists.back_pressure: + enabled: true + apachedruid.ingest.persists.count: + enabled: true + apachedruid.ingest.persists.cpu: + enabled: true + apachedruid.ingest.persists.failed: + enabled: true + apachedruid.ingest.persists.time: + enabled: true + apachedruid.ingest.rows.output: + enabled: true + apachedruid.ingest.segments.count: + enabled: true + apachedruid.ingest.shuffle.bytes: + enabled: true + apachedruid.ingest.shuffle.requests: + enabled: true + apachedruid.ingest.sink.count: + enabled: true + apachedruid.ingest.tombstones.count: + enabled: true + apachedruid.interval.compacted.count: + enabled: true + apachedruid.interval.skip_compact.count: + enabled: true + apachedruid.interval.wait_compact.count: + enabled: true + apachedruid.jetty.num_open_connections: + enabled: true + apachedruid.jetty.thread_pool.busy: + enabled: true + apachedruid.jetty.thread_pool.idle: + enabled: true + apachedruid.jetty.thread_pool.is_low_on_threads: + enabled: true + apachedruid.jetty.thread_pool.max: + enabled: true + apachedruid.jetty.thread_pool.min: + enabled: true + apachedruid.jetty.thread_pool.queue_size: + enabled: true + apachedruid.jetty.thread_pool.total: + enabled: true + apachedruid.jvm.bufferpool.capacity: + enabled: true + apachedruid.jvm.bufferpool.count: + enabled: true + apachedruid.jvm.bufferpool.used: + enabled: true + apachedruid.jvm.gc.count: + enabled: true + apachedruid.jvm.gc.cpu: + enabled: true + apachedruid.jvm.mem.committed: + enabled: true + apachedruid.jvm.mem.init: + enabled: true + apachedruid.jvm.mem.max: + enabled: true + apachedruid.jvm.mem.used: + enabled: true + apachedruid.jvm.pool.committed: + enabled: true + apachedruid.jvm.pool.init: + enabled: true + apachedruid.jvm.pool.max: + enabled: true + apachedruid.jvm.pool.used: + enabled: true + apachedruid.kill.pending_segments.count: + enabled: true + apachedruid.kill.task.count: + enabled: true + apachedruid.kill_task.available_slot.count: + enabled: true + apachedruid.kill_task.max_slot.count: + enabled: true + apachedruid.merge_buffer.pending_requests: + enabled: true + apachedruid.metadata.kill.audit.count: + enabled: true + apachedruid.metadata.kill.compaction.count: + enabled: true + apachedruid.metadata.kill.datasource.count: + enabled: true + apachedruid.metadata.kill.rule.count: + enabled: true + apachedruid.metadata.kill.supervisor.count: + enabled: true + apachedruid.metadatacache.init.time: + enabled: true + apachedruid.metadatacache.refresh.count: + enabled: true + apachedruid.metadatacache.refresh.time: + enabled: true + apachedruid.query.byte_limit.exceeded.count: + enabled: true + apachedruid.query.bytes: + enabled: true + apachedruid.query.cache.delta.average_bytes: + enabled: true + apachedruid.query.cache.delta.errors: + enabled: true + apachedruid.query.cache.delta.evictions: + enabled: true + apachedruid.query.cache.delta.hit_rate: + enabled: true + apachedruid.query.cache.delta.hits: + enabled: true + apachedruid.query.cache.delta.misses: + enabled: true + apachedruid.query.cache.delta.num_entries: + enabled: true + apachedruid.query.cache.delta.put.error: + enabled: true + apachedruid.query.cache.delta.put.ok: + enabled: true + apachedruid.query.cache.delta.put.oversized: + enabled: true + apachedruid.query.cache.delta.size_bytes: + enabled: true + apachedruid.query.cache.delta.timeouts: + enabled: true + apachedruid.query.cache.memcached.delta: + enabled: true + apachedruid.query.cache.memcached.total: + enabled: true + apachedruid.query.cache.total.average_bytes: + enabled: true + apachedruid.query.cache.total.errors: + enabled: true + apachedruid.query.cache.total.evictions: + enabled: true + apachedruid.query.cache.total.hit_rate: + enabled: true + apachedruid.query.cache.total.hits: + enabled: true + apachedruid.query.cache.total.misses: + enabled: true + apachedruid.query.cache.total.num_entries: + enabled: true + apachedruid.query.cache.total.put.error: + enabled: true + apachedruid.query.cache.total.put.ok: + enabled: true + apachedruid.query.cache.total.put.oversized: + enabled: true + apachedruid.query.cache.total.size_bytes: + enabled: true + apachedruid.query.cache.total.timeouts: + enabled: true + apachedruid.query.count: + enabled: true + apachedruid.query.cpu.time: + enabled: true + apachedruid.query.failed.count: + enabled: true + apachedruid.query.interrupted.count: + enabled: true + apachedruid.query.node.backpressure: + enabled: true + apachedruid.query.node.bytes: + enabled: true + apachedruid.query.node.time: + enabled: true + apachedruid.query.node.ttfb: + enabled: true + apachedruid.query.priority: + enabled: true + apachedruid.query.row_limit.exceeded.count: + enabled: true + apachedruid.query.segment.time: + enabled: true + apachedruid.query.segment_and_cache.time: + enabled: true + apachedruid.query.segments.count: + enabled: true + apachedruid.query.success.count: + enabled: true + apachedruid.query.time: + enabled: true + apachedruid.query.timeout.count: + enabled: true + apachedruid.query.wait.time: + enabled: true + apachedruid.segment.added.bytes: + enabled: true + apachedruid.segment.assign_skipped.count: + enabled: true + apachedruid.segment.assigned.count: + enabled: true + apachedruid.segment.compacted.bytes: + enabled: true + apachedruid.segment.compacted.count: + enabled: true + apachedruid.segment.count: + enabled: true + apachedruid.segment.deleted.count: + enabled: true + apachedruid.segment.drop_queue.count: + enabled: true + apachedruid.segment.drop_skipped.count: + enabled: true + apachedruid.segment.dropped.count: + enabled: true + apachedruid.segment.load_queue.assigned: + enabled: true + apachedruid.segment.load_queue.cancelled: + enabled: true + apachedruid.segment.load_queue.count: + enabled: true + apachedruid.segment.load_queue.failed: + enabled: true + apachedruid.segment.load_queue.size: + enabled: true + apachedruid.segment.load_queue.success: + enabled: true + apachedruid.segment.max: + enabled: true + apachedruid.segment.move_skipped.count: + enabled: true + apachedruid.segment.moved.bytes: + enabled: true + apachedruid.segment.moved.count: + enabled: true + apachedruid.segment.nuked.bytes: + enabled: true + apachedruid.segment.over_shadowed.count: + enabled: true + apachedruid.segment.pending_delete: + enabled: true + apachedruid.segment.row_count.avg: + enabled: true + apachedruid.segment.row_count.range.count: + enabled: true + apachedruid.segment.scan.active: + enabled: true + apachedruid.segment.scan.pending: + enabled: true + apachedruid.segment.size: + enabled: true + apachedruid.segment.skip_compact.bytes: + enabled: true + apachedruid.segment.skip_compact.count: + enabled: true + apachedruid.segment.unavailable.count: + enabled: true + apachedruid.segment.under_replicated.count: + enabled: true + apachedruid.segment.unneeded.count: + enabled: true + apachedruid.segment.used: + enabled: true + apachedruid.segment.used_percent: + enabled: true + apachedruid.segment.wait_compact.bytes: + enabled: true + apachedruid.segment.wait_compact.count: + enabled: true + apachedruid.serverview.init.time: + enabled: true + apachedruid.serverview.sync.healthy: + enabled: true + apachedruid.serverview.sync.unstable_time: + enabled: true + apachedruid.sql_query.bytes: + enabled: true + apachedruid.sql_query.planning_time_ms: + enabled: true + apachedruid.sql_query.time: + enabled: true + apachedruid.subquery.byte_limit.count: + enabled: true + apachedruid.subquery.fallback.count: + enabled: true + apachedruid.subquery.fallback.insufficient_type.count: + enabled: true + apachedruid.subquery.fallback.unknown_reason.count: + enabled: true + apachedruid.subquery.row_limit.count: + enabled: true + apachedruid.sys.cpu: + enabled: true + apachedruid.sys.disk.queue: + enabled: true + apachedruid.sys.disk.read.count: + enabled: true + apachedruid.sys.disk.read.size: + enabled: true + apachedruid.sys.disk.transfer_time: + enabled: true + apachedruid.sys.disk.write.count: + enabled: true + apachedruid.sys.disk.write.size: + enabled: true + apachedruid.sys.fs.files.count: + enabled: true + apachedruid.sys.fs.files.free: + enabled: true + apachedruid.sys.fs.max: + enabled: true + apachedruid.sys.fs.used: + enabled: true + apachedruid.sys.la.1: + enabled: true + apachedruid.sys.la.15: + enabled: true + apachedruid.sys.la.5: + enabled: true + apachedruid.sys.mem.free: + enabled: true + apachedruid.sys.mem.max: + enabled: true + apachedruid.sys.mem.used: + enabled: true + apachedruid.sys.net.read.dropped: + enabled: true + apachedruid.sys.net.read.errors: + enabled: true + apachedruid.sys.net.read.packets: + enabled: true + apachedruid.sys.net.read.size: + enabled: true + apachedruid.sys.net.write.collisions: + enabled: true + apachedruid.sys.net.write.errors: + enabled: true + apachedruid.sys.net.write.packets: + enabled: true + apachedruid.sys.net.write.size: + enabled: true + apachedruid.sys.storage.used: + enabled: true + apachedruid.sys.swap.free: + enabled: true + apachedruid.sys.swap.max: + enabled: true + apachedruid.sys.swap.page_in: + enabled: true + apachedruid.sys.swap.page_out: + enabled: true + apachedruid.sys.tcpv4.active_opens: + enabled: true + apachedruid.sys.tcpv4.attempt_fails: + enabled: true + apachedruid.sys.tcpv4.estab_resets: + enabled: true + apachedruid.sys.tcpv4.in.errs: + enabled: true + apachedruid.sys.tcpv4.in.segs: + enabled: true + apachedruid.sys.tcpv4.out.rsts: + enabled: true + apachedruid.sys.tcpv4.out.segs: + enabled: true + apachedruid.sys.tcpv4.passive_opens: + enabled: true + apachedruid.sys.tcpv4.retrans.segs: + enabled: true + apachedruid.sys.uptime: + enabled: true + apachedruid.task.action.batch.attempts: + enabled: true + apachedruid.task.action.batch.queue_time: + enabled: true + apachedruid.task.action.batch.run_time: + enabled: true + apachedruid.task.action.batch.size: + enabled: true + apachedruid.task.action.failed.count: + enabled: true + apachedruid.task.action.log.time: + enabled: true + apachedruid.task.action.run.time: + enabled: true + apachedruid.task.action.success.count: + enabled: true + apachedruid.task.failed.count: + enabled: true + apachedruid.task.pending.count: + enabled: true + apachedruid.task.pending.time: + enabled: true + apachedruid.task.run.time: + enabled: true + apachedruid.task.running.count: + enabled: true + apachedruid.task.segment_availability.wait.time: + enabled: true + apachedruid.task.success.count: + enabled: true + apachedruid.task.waiting.count: + enabled: true + apachedruid.task_slot.blacklisted.count: + enabled: true + apachedruid.task_slot.idle.count: + enabled: true + apachedruid.task_slot.lazy.count: + enabled: true + apachedruid.task_slot.total.count: + enabled: true + apachedruid.task_slot.used.count: + enabled: true + apachedruid.tier.historical.count: + enabled: true + apachedruid.tier.replication.factor: + enabled: true + apachedruid.tier.required.capacity: + enabled: true + apachedruid.tier.total.capacity: + enabled: true + apachedruid.worker.task.failed.count: + enabled: true + apachedruid.worker.task.success.count: + enabled: true + apachedruid.worker.task_slot.idle.count: + enabled: true + apachedruid.worker.task_slot.total.count: + enabled: true + apachedruid.worker.task_slot.used.count: + enabled: true + apachedruid.zk.connected: + enabled: true + apachedruid.zk.reconnect.time: + enabled: true + resource_attributes: + apachedruid.cluster.name: + enabled: true + apachedruid.node.host: + enabled: true + apachedruid.node.service: + enabled: true +none_set: + metrics: + apachedruid.compact.segment_analyzer.fetch_and_process_millis: + enabled: false + apachedruid.compact.task.count: + enabled: false + apachedruid.compact_task.available_slot.count: + enabled: false + apachedruid.compact_task.max_slot.count: + enabled: false + apachedruid.coordinator.global.time: + enabled: false + apachedruid.coordinator.time: + enabled: false + apachedruid.ingest.bytes.received: + enabled: false + apachedruid.ingest.count: + enabled: false + apachedruid.ingest.events.buffered: + enabled: false + apachedruid.ingest.events.duplicate: + enabled: false + apachedruid.ingest.events.message_gap: + enabled: false + apachedruid.ingest.events.processed: + enabled: false + apachedruid.ingest.events.processed_with_error: + enabled: false + apachedruid.ingest.events.thrown_away: + enabled: false + apachedruid.ingest.events.unparseable: + enabled: false + apachedruid.ingest.handoff.count: + enabled: false + apachedruid.ingest.handoff.failed: + enabled: false + apachedruid.ingest.handoff.time: + enabled: false + apachedruid.ingest.input.bytes: + enabled: false + apachedruid.ingest.kafka.avg_lag: + enabled: false + apachedruid.ingest.kafka.lag: + enabled: false + apachedruid.ingest.kafka.max_lag: + enabled: false + apachedruid.ingest.kafka.partition_lag: + enabled: false + apachedruid.ingest.kinesis.avg_lag.time: + enabled: false + apachedruid.ingest.kinesis.lag.time: + enabled: false + apachedruid.ingest.kinesis.max_lag.time: + enabled: false + apachedruid.ingest.kinesis.partition_lag.time: + enabled: false + apachedruid.ingest.merge.cpu: + enabled: false + apachedruid.ingest.merge.time: + enabled: false + apachedruid.ingest.notices.queue_size: + enabled: false + apachedruid.ingest.notices.time: + enabled: false + apachedruid.ingest.pause.time: + enabled: false + apachedruid.ingest.persists.back_pressure: + enabled: false + apachedruid.ingest.persists.count: + enabled: false + apachedruid.ingest.persists.cpu: + enabled: false + apachedruid.ingest.persists.failed: + enabled: false + apachedruid.ingest.persists.time: + enabled: false + apachedruid.ingest.rows.output: + enabled: false + apachedruid.ingest.segments.count: + enabled: false + apachedruid.ingest.shuffle.bytes: + enabled: false + apachedruid.ingest.shuffle.requests: + enabled: false + apachedruid.ingest.sink.count: + enabled: false + apachedruid.ingest.tombstones.count: + enabled: false + apachedruid.interval.compacted.count: + enabled: false + apachedruid.interval.skip_compact.count: + enabled: false + apachedruid.interval.wait_compact.count: + enabled: false + apachedruid.jetty.num_open_connections: + enabled: false + apachedruid.jetty.thread_pool.busy: + enabled: false + apachedruid.jetty.thread_pool.idle: + enabled: false + apachedruid.jetty.thread_pool.is_low_on_threads: + enabled: false + apachedruid.jetty.thread_pool.max: + enabled: false + apachedruid.jetty.thread_pool.min: + enabled: false + apachedruid.jetty.thread_pool.queue_size: + enabled: false + apachedruid.jetty.thread_pool.total: + enabled: false + apachedruid.jvm.bufferpool.capacity: + enabled: false + apachedruid.jvm.bufferpool.count: + enabled: false + apachedruid.jvm.bufferpool.used: + enabled: false + apachedruid.jvm.gc.count: + enabled: false + apachedruid.jvm.gc.cpu: + enabled: false + apachedruid.jvm.mem.committed: + enabled: false + apachedruid.jvm.mem.init: + enabled: false + apachedruid.jvm.mem.max: + enabled: false + apachedruid.jvm.mem.used: + enabled: false + apachedruid.jvm.pool.committed: + enabled: false + apachedruid.jvm.pool.init: + enabled: false + apachedruid.jvm.pool.max: + enabled: false + apachedruid.jvm.pool.used: + enabled: false + apachedruid.kill.pending_segments.count: + enabled: false + apachedruid.kill.task.count: + enabled: false + apachedruid.kill_task.available_slot.count: + enabled: false + apachedruid.kill_task.max_slot.count: + enabled: false + apachedruid.merge_buffer.pending_requests: + enabled: false + apachedruid.metadata.kill.audit.count: + enabled: false + apachedruid.metadata.kill.compaction.count: + enabled: false + apachedruid.metadata.kill.datasource.count: + enabled: false + apachedruid.metadata.kill.rule.count: + enabled: false + apachedruid.metadata.kill.supervisor.count: + enabled: false + apachedruid.metadatacache.init.time: + enabled: false + apachedruid.metadatacache.refresh.count: + enabled: false + apachedruid.metadatacache.refresh.time: + enabled: false + apachedruid.query.byte_limit.exceeded.count: + enabled: false + apachedruid.query.bytes: + enabled: false + apachedruid.query.cache.delta.average_bytes: + enabled: false + apachedruid.query.cache.delta.errors: + enabled: false + apachedruid.query.cache.delta.evictions: + enabled: false + apachedruid.query.cache.delta.hit_rate: + enabled: false + apachedruid.query.cache.delta.hits: + enabled: false + apachedruid.query.cache.delta.misses: + enabled: false + apachedruid.query.cache.delta.num_entries: + enabled: false + apachedruid.query.cache.delta.put.error: + enabled: false + apachedruid.query.cache.delta.put.ok: + enabled: false + apachedruid.query.cache.delta.put.oversized: + enabled: false + apachedruid.query.cache.delta.size_bytes: + enabled: false + apachedruid.query.cache.delta.timeouts: + enabled: false + apachedruid.query.cache.memcached.delta: + enabled: false + apachedruid.query.cache.memcached.total: + enabled: false + apachedruid.query.cache.total.average_bytes: + enabled: false + apachedruid.query.cache.total.errors: + enabled: false + apachedruid.query.cache.total.evictions: + enabled: false + apachedruid.query.cache.total.hit_rate: + enabled: false + apachedruid.query.cache.total.hits: + enabled: false + apachedruid.query.cache.total.misses: + enabled: false + apachedruid.query.cache.total.num_entries: + enabled: false + apachedruid.query.cache.total.put.error: + enabled: false + apachedruid.query.cache.total.put.ok: + enabled: false + apachedruid.query.cache.total.put.oversized: + enabled: false + apachedruid.query.cache.total.size_bytes: + enabled: false + apachedruid.query.cache.total.timeouts: + enabled: false + apachedruid.query.count: + enabled: false + apachedruid.query.cpu.time: + enabled: false + apachedruid.query.failed.count: + enabled: false + apachedruid.query.interrupted.count: + enabled: false + apachedruid.query.node.backpressure: + enabled: false + apachedruid.query.node.bytes: + enabled: false + apachedruid.query.node.time: + enabled: false + apachedruid.query.node.ttfb: + enabled: false + apachedruid.query.priority: + enabled: false + apachedruid.query.row_limit.exceeded.count: + enabled: false + apachedruid.query.segment.time: + enabled: false + apachedruid.query.segment_and_cache.time: + enabled: false + apachedruid.query.segments.count: + enabled: false + apachedruid.query.success.count: + enabled: false + apachedruid.query.time: + enabled: false + apachedruid.query.timeout.count: + enabled: false + apachedruid.query.wait.time: + enabled: false + apachedruid.segment.added.bytes: + enabled: false + apachedruid.segment.assign_skipped.count: + enabled: false + apachedruid.segment.assigned.count: + enabled: false + apachedruid.segment.compacted.bytes: + enabled: false + apachedruid.segment.compacted.count: + enabled: false + apachedruid.segment.count: + enabled: false + apachedruid.segment.deleted.count: + enabled: false + apachedruid.segment.drop_queue.count: + enabled: false + apachedruid.segment.drop_skipped.count: + enabled: false + apachedruid.segment.dropped.count: + enabled: false + apachedruid.segment.load_queue.assigned: + enabled: false + apachedruid.segment.load_queue.cancelled: + enabled: false + apachedruid.segment.load_queue.count: + enabled: false + apachedruid.segment.load_queue.failed: + enabled: false + apachedruid.segment.load_queue.size: + enabled: false + apachedruid.segment.load_queue.success: + enabled: false + apachedruid.segment.max: + enabled: false + apachedruid.segment.move_skipped.count: + enabled: false + apachedruid.segment.moved.bytes: + enabled: false + apachedruid.segment.moved.count: + enabled: false + apachedruid.segment.nuked.bytes: + enabled: false + apachedruid.segment.over_shadowed.count: + enabled: false + apachedruid.segment.pending_delete: + enabled: false + apachedruid.segment.row_count.avg: + enabled: false + apachedruid.segment.row_count.range.count: + enabled: false + apachedruid.segment.scan.active: + enabled: false + apachedruid.segment.scan.pending: + enabled: false + apachedruid.segment.size: + enabled: false + apachedruid.segment.skip_compact.bytes: + enabled: false + apachedruid.segment.skip_compact.count: + enabled: false + apachedruid.segment.unavailable.count: + enabled: false + apachedruid.segment.under_replicated.count: + enabled: false + apachedruid.segment.unneeded.count: + enabled: false + apachedruid.segment.used: + enabled: false + apachedruid.segment.used_percent: + enabled: false + apachedruid.segment.wait_compact.bytes: + enabled: false + apachedruid.segment.wait_compact.count: + enabled: false + apachedruid.serverview.init.time: + enabled: false + apachedruid.serverview.sync.healthy: + enabled: false + apachedruid.serverview.sync.unstable_time: + enabled: false + apachedruid.sql_query.bytes: + enabled: false + apachedruid.sql_query.planning_time_ms: + enabled: false + apachedruid.sql_query.time: + enabled: false + apachedruid.subquery.byte_limit.count: + enabled: false + apachedruid.subquery.fallback.count: + enabled: false + apachedruid.subquery.fallback.insufficient_type.count: + enabled: false + apachedruid.subquery.fallback.unknown_reason.count: + enabled: false + apachedruid.subquery.row_limit.count: + enabled: false + apachedruid.sys.cpu: + enabled: false + apachedruid.sys.disk.queue: + enabled: false + apachedruid.sys.disk.read.count: + enabled: false + apachedruid.sys.disk.read.size: + enabled: false + apachedruid.sys.disk.transfer_time: + enabled: false + apachedruid.sys.disk.write.count: + enabled: false + apachedruid.sys.disk.write.size: + enabled: false + apachedruid.sys.fs.files.count: + enabled: false + apachedruid.sys.fs.files.free: + enabled: false + apachedruid.sys.fs.max: + enabled: false + apachedruid.sys.fs.used: + enabled: false + apachedruid.sys.la.1: + enabled: false + apachedruid.sys.la.15: + enabled: false + apachedruid.sys.la.5: + enabled: false + apachedruid.sys.mem.free: + enabled: false + apachedruid.sys.mem.max: + enabled: false + apachedruid.sys.mem.used: + enabled: false + apachedruid.sys.net.read.dropped: + enabled: false + apachedruid.sys.net.read.errors: + enabled: false + apachedruid.sys.net.read.packets: + enabled: false + apachedruid.sys.net.read.size: + enabled: false + apachedruid.sys.net.write.collisions: + enabled: false + apachedruid.sys.net.write.errors: + enabled: false + apachedruid.sys.net.write.packets: + enabled: false + apachedruid.sys.net.write.size: + enabled: false + apachedruid.sys.storage.used: + enabled: false + apachedruid.sys.swap.free: + enabled: false + apachedruid.sys.swap.max: + enabled: false + apachedruid.sys.swap.page_in: + enabled: false + apachedruid.sys.swap.page_out: + enabled: false + apachedruid.sys.tcpv4.active_opens: + enabled: false + apachedruid.sys.tcpv4.attempt_fails: + enabled: false + apachedruid.sys.tcpv4.estab_resets: + enabled: false + apachedruid.sys.tcpv4.in.errs: + enabled: false + apachedruid.sys.tcpv4.in.segs: + enabled: false + apachedruid.sys.tcpv4.out.rsts: + enabled: false + apachedruid.sys.tcpv4.out.segs: + enabled: false + apachedruid.sys.tcpv4.passive_opens: + enabled: false + apachedruid.sys.tcpv4.retrans.segs: + enabled: false + apachedruid.sys.uptime: + enabled: false + apachedruid.task.action.batch.attempts: + enabled: false + apachedruid.task.action.batch.queue_time: + enabled: false + apachedruid.task.action.batch.run_time: + enabled: false + apachedruid.task.action.batch.size: + enabled: false + apachedruid.task.action.failed.count: + enabled: false + apachedruid.task.action.log.time: + enabled: false + apachedruid.task.action.run.time: + enabled: false + apachedruid.task.action.success.count: + enabled: false + apachedruid.task.failed.count: + enabled: false + apachedruid.task.pending.count: + enabled: false + apachedruid.task.pending.time: + enabled: false + apachedruid.task.run.time: + enabled: false + apachedruid.task.running.count: + enabled: false + apachedruid.task.segment_availability.wait.time: + enabled: false + apachedruid.task.success.count: + enabled: false + apachedruid.task.waiting.count: + enabled: false + apachedruid.task_slot.blacklisted.count: + enabled: false + apachedruid.task_slot.idle.count: + enabled: false + apachedruid.task_slot.lazy.count: + enabled: false + apachedruid.task_slot.total.count: + enabled: false + apachedruid.task_slot.used.count: + enabled: false + apachedruid.tier.historical.count: + enabled: false + apachedruid.tier.replication.factor: + enabled: false + apachedruid.tier.required.capacity: + enabled: false + apachedruid.tier.total.capacity: + enabled: false + apachedruid.worker.task.failed.count: + enabled: false + apachedruid.worker.task.success.count: + enabled: false + apachedruid.worker.task_slot.idle.count: + enabled: false + apachedruid.worker.task_slot.total.count: + enabled: false + apachedruid.worker.task_slot.used.count: + enabled: false + apachedruid.zk.connected: + enabled: false + apachedruid.zk.reconnect.time: + enabled: false + resource_attributes: + apachedruid.cluster.name: + enabled: false + apachedruid.node.host: + enabled: false + apachedruid.node.service: + enabled: false diff --git a/receiver/apachedruidreceiver/metadata.yaml b/receiver/apachedruidreceiver/metadata.yaml new file mode 100644 index 0000000000000..4542eeb8cdb8b --- /dev/null +++ b/receiver/apachedruidreceiver/metadata.yaml @@ -0,0 +1,2541 @@ +type: apachedruid + +status: + class: receiver + stability: + development: [metrics, logs] + distributions: [contrib, observiq, sumo] + codeowners: + active: [yuanlihan] + +resource_attributes: + apachedruid.cluster.name: + description: The name of the apachedruid cluster. + type: string + enabled: true + apachedruid.node.host: + description: The name of the apachedruid node. + type: string + enabled: true + apachedruid.node.service: + description: The service name of the apachedruid node. + type: string + enabled: true +attributes: + query_data_source: + name_override: data_source + description: The data source name of the query. + type: string + query_num_metrics: + name_override: num_metrics + description: The number of metrics of the query. + type: string + query_dimension: + name_override: dimension + description: The dimension of the query. + type: string + query_has_filters: + name_override: has_filters + description: Whether query has filters. + type: string + query_threshold: + name_override: threshold + description: The threshold of query. + type: int + query_num_complex_metrics: + name_override: num_complex_metrics + description: The number of complex metrics. + type: int + query_type: + name_override: type + description: The type of query. + type: string + query_remote_address: + name_override: remote_address + description: The remote address of the query. + type: string + query_id: + name_override: id + description: The id of query. + type: string + query_context: + name_override: context + description: The context of the query. + type: string + query_num_dimensions: + name_override: num_dimensions + description: The number of dimensions of query. + type: string + query_interval: + name_override: interval + description: The interval of the query. + type: string + query_duration: + name_override: duration + description: The duration of query. + type: string + query_status: + name_override: status + description: The status of the query. + type: string + query_server: + name_override: server + description: The server of the query. + type: string + query_lane: + name_override: lane + description: The name of query lane. + type: string + sqlQuery_data_source: + name_override: data_source + description: The data source name of the query. + type: string + sqlQuery_native_query_ids: + name_override: native_query_ids + description: The native query ids of sql query. + type: string + sqlQuery_engine: + name_override: engine + description: The engine name of the sql query. + type: string + sqlQuery_remote_address: + name_override: remote_address + description: The remote address of sql query. + type: string + sqlQuery_id: + name_override: id + description: The id of sql query. + type: string + sqlQuery_success: + name_override: success + description: Whether sql query is successful. + type: string + serverview_tier: + name_override: tier + description: The name of the tier. + type: string + serverview_server: + name_override: server + description: The address of server. + type: string + query_segment: + name_override: segment + description: The segment of the query. + type: string + query_vectorized: + name_override: vectorized + description: Whether query is vectorized. + type: string + ingest_task_type: + name_override: task_type + description: The type of ingestion task. + type: string + ingest_data_source: + name_override: data_source + description: The data source of ingestion task. + type: string + ingest_group_id: + name_override: group_id + description: The ingestion group id. + type: string + ingest_tags: + name_override: tags + description: The names of tags. + type: string + ingest_task_id: + name_override: task_id + description: The id of the task. + type: string + ingest_task_ingestion_mode: + name_override: task_ingestion_mode + description: The mode of ingestion task. + type: string + ingest_stream: + name_override: stream + description: The name of stream to ingest. + type: string + ingest_partition: + name_override: partition + description: The partition of the topic. + type: string + compact_task_type: + name_override: task_type + description: The type of task. + type: string + compact_data_source: + name_override: data_source + description: The data source of compaction task. + type: string + compact_group_id: + name_override: group_id + description: The group id of compaction task. + type: string + compact_tags: + name_override: tags + description: The tags of the compaction task. + type: string + compact_task_id: + name_override: task_id + description: The task id of compaction task. + type: string + task_type: + name_override: task_type + description: The type of task. + type: string + task_data_source: + name_override: data_source + description: The data source of the task. + type: string + task_group_id: + name_override: group_id + description: The group id of the task. + type: string + task_status: + name_override: task_status + description: The status of the task. + type: string + task_tags: + name_override: tags + description: The tags of task. + type: string + task_id: + name_override: task_id + description: The id of task. + type: string + task_action_type: + name_override: task_action_type + description: The action type of task. + type: string + task_interval: + name_override: interval + description: The interval of task. + type: string + task_segment_availability_confirmed: + name_override: segment_availability_confirmed + description: Whether segment availability is confirmed. + type: string + segment_task_type: + name_override: task_type + description: The task type of the segment. + type: string + segment_data_source: + name_override: data_source + description: The data source of the segment. + type: string + segment_group_id: + name_override: group_id + description: The group id of segment. + type: string + segment_tags: + name_override: tags + description: The tags of the segment. + type: string + segment_task_id: + name_override: task_id + description: The task id of segment. + type: string + segment_interval: + name_override: interval + description: The interval of segment. + type: string + taskSlot_category: + name_override: category + description: The category of task slot. + type: string + worker_category: + name_override: category + description: The category of worker. + type: string + worker_version: + name_override: worker_version + description: The verson of worker. + type: string + ingest_supervisor_task_id: + name_override: supervisor_task_id + description: The task id of supervisor. + type: string + segment_tier: + name_override: tier + description: The name of segment tier. + type: string + segment_description: + name_override: description + description: The description of segment. + type: string + segment_server: + name_override: server + description: The server of the segment. + type: string + segment_priority: + name_override: priority + description: The priority of segment. + type: string + tier: + name_override: tier + description: The name of tier. + type: string + kill_data_source: + name_override: data_source + description: The data source name of the kill task. + type: string + interval_data_source: + name_override: data_source + description: The interval of data source. + type: string + coordinator_duty: + name_override: duty + description: The name of coordinator duty task. + type: string + coordinator_duty_group: + name_override: duty_group + description: The name of the duty group. + type: string + segment_range: + name_override: range + description: The range of segment. + type: string + jvm_pool_name: + name_override: pool_name + description: The name of the pool. + type: string + jvm_pool_kind: + name_override: pool_kind + description: The pool kind of jvm. + type: string + jvm_bufferpool_name: + name_override: bufferpool_name + description: The name of buffer pool. + type: string + jvm_mem_kind: + name_override: mem_kind + description: The memory kind of jvm. + type: string + jvm_gc_gen: + name_override: gc_gen + description: The name of GC generation. + type: string + jvm_gc_name: + name_override: gc_name + description: The gc name of jvm. + type: string + ingest_service_name: + name_override: service_name + description: The name of ingestion service. + type: string + ingest_buffer_capacity: + name_override: buffer_capacity + description: The capacity of ingestion buffer. + type: string + sys_disk_name: + name_override: disk_name + description: The name of disk. + type: string + sys_net_hwaddr: + name_override: net_hwaddr + description: The net hardware address. + type: string + sys_net_name: + name_override: net_name + description: The name of network. + type: string + sys_net_address: + name_override: net_address + description: The net address. + type: string + sys_fs_dir_name: + name_override: fs_dir_name + description: The dir name. + type: string + sys_fs_dev_name: + name_override: fs_dev_name + description: The dev name. + type: string + sys_cpu_time: + name_override: cpu_time + description: The group name of cpu time usage. + type: string + sys_cpu_name: + name_override: cpu_name + description: The group name of cpu usage. + type: string +metrics: + apachedruid.query.time: + description: Milliseconds taken to complete a query. + unit: ms + gauge: + value_type: int + attributes: + [ + query_data_source, + query_num_metrics, + query_dimension, + query_has_filters, + query_threshold, + query_num_complex_metrics, + query_type, + query_remote_address, + query_id, + query_context, + query_num_dimensions, + query_interval, + query_duration, + ] + enabled: true + apachedruid.query.bytes: + description: The total number of bytes returned to the requesting client in the query response from the broker. Other services report the total bytes for their portion of the query. + unit: By + gauge: + value_type: int + attributes: + [ + query_data_source, + query_num_metrics, + query_dimension, + query_has_filters, + query_threshold, + query_num_complex_metrics, + query_type, + query_remote_address, + query_id, + query_context, + query_num_dimensions, + query_interval, + query_duration, + ] + enabled: true + apachedruid.query.node.time: + description: Milliseconds taken to query individual historical/realtime processes. + unit: ms + gauge: + value_type: int + attributes: [query_status, query_server, query_id] + enabled: true + apachedruid.query.node.bytes: + description: Number of bytes returned from querying individual historical/realtime processes. + unit: By + gauge: + value_type: int + attributes: [query_status, query_server, query_id] + enabled: true + apachedruid.query.node.ttfb: + description: Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes. + unit: ms + gauge: + value_type: int + attributes: [query_status, query_server, query_id] + enabled: true + apachedruid.query.node.backpressure: + description: Milliseconds that the channel to this process has spent suspended due to backpressure. + unit: ms + gauge: + value_type: int + attributes: [query_status, query_server, query_id] + enabled: true + apachedruid.query.count: + description: Number of total queries. + unit: "{queries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.success.count: + description: Number of queries successfully processed. + unit: "{queries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.failed.count: + description: Number of failed queries. + unit: "{queries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.interrupted.count: + description: Number of queries interrupted due to cancellation. + unit: "{queries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.timeout.count: + description: Number of timed out queries. + unit: "{queries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.merge_buffer.pending_requests: + description: Number of requests waiting to acquire a batch of buffers from the merge buffer pool. + unit: "{requests}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.segments.count: + description: This metric is not enabled by default. See the `QueryMetrics` Interface for reference regarding enabling this metric. Number of segments that will be touched by the query. In the broker, it makes a plan to distribute the query to realtime tasks and historicals based on a snapshot of segment distribution state. If there are some segments moved after this snapshot is created, certain historicals and realtime tasks can report those segments as missing to the broker. The broker will resend the query to the new servers that serve those segments after move. In this case, those segments can be counted more than once in this metric. + unit: "{segments}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.priority: + description: Assigned lane and priority, only if Laning strategy is enabled. Refer to [Laning strategies](https,//druid.apache.org/docs/latest/configuration#laning-strategies). + unit: 1 + gauge: + value_type: int + attributes: [query_type, query_data_source, query_lane] + enabled: true + apachedruid.sql_query.time: + description: Milliseconds taken to complete a SQL query. + unit: ms + gauge: + value_type: int + attributes: + [ + sqlQuery_data_source, + sqlQuery_native_query_ids, + sqlQuery_engine, + sqlQuery_remote_address, + sqlQuery_id, + sqlQuery_success, + ] + enabled: true + apachedruid.sql_query.planning_time_ms: + description: Milliseconds taken to plan a SQL to native query. + unit: ms + gauge: + value_type: int + attributes: + [ + sqlQuery_data_source, + sqlQuery_native_query_ids, + sqlQuery_engine, + sqlQuery_remote_address, + sqlQuery_id, + sqlQuery_success, + ] + enabled: true + apachedruid.sql_query.bytes: + description: Number of bytes returned in the SQL query response. + unit: By + gauge: + value_type: int + attributes: + [ + sqlQuery_data_source, + sqlQuery_native_query_ids, + sqlQuery_engine, + sqlQuery_remote_address, + sqlQuery_id, + sqlQuery_success, + ] + enabled: true + apachedruid.serverview.init.time: + description: Time taken to initialize the broker server view. Useful to detect if brokers are taking too long to start. + unit: ms + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.metadatacache.init.time: + description: Time taken to initialize the broker segment metadata cache. Useful to detect if brokers are taking too long to start. + unit: ms + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.metadatacache.refresh.count: + description: Number of segments to refresh in broker segment metadata cache. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.metadatacache.refresh.time: + description: Time taken to refresh segments in broker segment metadata cache. + unit: ms + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.serverview.sync.healthy: + description: Sync status of the Broker with a segment-loading server such as a Historical or Peon. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. This metric can be used in conjunction with `serverview/sync/unstableTime` to debug slow startup of Brokers. + unit: 1 + gauge: + value_type: int + attributes: [serverview_tier, serverview_server] + enabled: true + apachedruid.serverview.sync.unstable_time: + description: Time in milliseconds for which the Broker has been failing to sync with a segment-loading server. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. + unit: ms + gauge: + value_type: int + attributes: [serverview_tier, serverview_server] + enabled: true + apachedruid.subquery.row_limit.count: + description: Number of subqueries whose results are materialized as rows (Java objects on heap). + unit: "{subqueries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.subquery.byte_limit.count: + description: Number of subqueries whose results are materialized as frames (Druid's internal byte representation of rows). + unit: "{subqueries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.subquery.fallback.count: + description: Number of subqueries which cannot be materialized as frames. + unit: "{subqueries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.subquery.fallback.insufficient_type.count: + description: Number of subqueries which cannot be materialized as frames due to insufficient type information in the row signature. + unit: "{subqueries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.subquery.fallback.unknown_reason.count: + description: Number of subqueries which cannot be materialized as frames due other reasons. + unit: "{subqueries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.row_limit.exceeded.count: + description: Number of queries whose inlined subquery results exceeded the given row limit. + unit: "{queries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.byte_limit.exceeded.count: + description: Number of queries whose inlined subquery results exceeded the given byte limit. + unit: "{queries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.segment.time: + description: Milliseconds taken to query individual segment. Includes time to page in the segment from disk. + unit: ms + gauge: + value_type: int + attributes: [query_status, query_segment, query_id, query_vectorized] + enabled: true + apachedruid.query.wait.time: + description: Milliseconds spent waiting for a segment to be scanned. + unit: ms + gauge: + value_type: int + attributes: [query_segment, query_id] + enabled: true + apachedruid.segment.scan.pending: + description: Number of segments in queue waiting to be scanned. + unit: "{segments}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.segment.scan.active: + description: Number of segments currently scanned. This metric also indicates how many threads from `druid.processing.numThreads` are currently being used. + unit: "{segments}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.segment_and_cache.time: + description: Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process). + unit: ms + gauge: + value_type: int + attributes: [query_segment, query_id] + enabled: true + apachedruid.query.cpu.time: + description: Microseconds of CPU time taken to complete a query. + unit: ms + gauge: + value_type: int + attributes: + [ + query_data_source, + query_num_metrics, + query_dimension, + query_has_filters, + query_threshold, + query_num_complex_metrics, + query_type, + query_remote_address, + query_id, + query_context, + query_num_dimensions, + query_interval, + query_duration, + ] + enabled: true + apachedruid.jetty.num_open_connections: + description: Number of open jetty connections. + unit: "{connections}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.jetty.thread_pool.total: + description: Number of total workable threads allocated. + unit: "{threads}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.jetty.thread_pool.idle: + description: Number of idle threads. + unit: "{threads}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.jetty.thread_pool.busy: + description: Number of busy threads that has work to do from the worker queue. + unit: "{threads}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.jetty.thread_pool.is_low_on_threads: + description: A rough indicator of whether number of total workable threads allocated is enough to handle the works in the work queue. + unit: "{threads}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.jetty.thread_pool.min: + description: Number of minimum threads allocatable. + unit: "{threads}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.jetty.thread_pool.max: + description: Number of maximum threads allocatable. + unit: "{threads}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.jetty.thread_pool.queue_size: + description: Size of the worker queue. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.num_entries: + description: Number of cache entries. + unit: "{entries}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.num_entries: + description: Number of cache entries. + unit: "{entries}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.size_bytes: + description: Size in bytes of cache entries. + unit: By + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.size_bytes: + description: Size in bytes of cache entries. + unit: By + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.hits: + description: Number of cache hits. + unit: "{hits}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.hits: + description: Number of cache hits. + unit: "{hits}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.misses: + description: Number of cache misses. + unit: "{misses}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.misses: + description: Number of cache misses. + unit: "{misses}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.evictions: + description: Number of cache evictions. + unit: "{evictions}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.evictions: + description: Number of cache evictions. + unit: "{evictions}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.hit_rate: + description: Cache hit rate. + unit: 1.0 + sum: + monotonic: true + aggregation_temporality: delta + value_type: double + attributes: [] + enabled: true + apachedruid.query.cache.total.hit_rate: + description: Cache hit rate. + unit: 1.0 + gauge: + value_type: double + attributes: [] + enabled: true + apachedruid.query.cache.delta.average_bytes: + description: Average cache entry byte size. + unit: By + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.average_bytes: + description: Average cache entry byte size. + unit: By + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.timeouts: + description: Number of cache timeouts. + unit: "{timeouts}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.timeouts: + description: Number of cache timeouts. + unit: "{timeouts}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.errors: + description: Number of cache errors. + unit: "{errors}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.errors: + description: Number of cache errors. + unit: "{errors}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.put.ok: + description: Number of new cache entries successfully cached. + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.put.ok: + description: Number of new cache entries successfully cached. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.put.error: + description: Number of new cache entries that could not be cached due to errors. + unit: "{errors}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.put.error: + description: Number of new cache entries that could not be cached due to errors. + unit: "{errors}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.delta.put.oversized: + description: Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties). + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.total.put.oversized: + description: Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties). + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.memcached.total: + description: Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their actual values. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.query.cache.memcached.delta: + description: Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their delta from the prior event emission. + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.ingest.count: + description: Count of `1` every time an ingestion job runs (includes compaction jobs). Aggregate using dimensions. + unit: 1 + gauge: + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ingest_task_ingestion_mode, + ] + enabled: true + apachedruid.ingest.segments.count: + description: Count of final segments created by job (includes tombstones). + unit: 1 + gauge: + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ingest_task_ingestion_mode, + ] + enabled: true + apachedruid.ingest.tombstones.count: + description: Count of tombstones created by job. + unit: 1 + gauge: + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ingest_task_ingestion_mode, + ] + enabled: true + apachedruid.ingest.kafka.lag: + description: Total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. + unit: 1 + gauge: + value_type: int + attributes: [ingest_tags, ingest_stream, ingest_data_source] + enabled: true + apachedruid.ingest.kafka.max_lag: + description: Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. + unit: 1 + gauge: + value_type: int + attributes: [ingest_tags, ingest_stream, ingest_data_source] + enabled: true + apachedruid.ingest.kafka.avg_lag: + description: Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. + unit: 1 + gauge: + value_type: int + attributes: [ingest_tags, ingest_stream, ingest_data_source] + enabled: true + apachedruid.ingest.kafka.partition_lag: + description: Partition-wise lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers. Minimum emission period for this metric is a minute. + unit: 1 + gauge: + value_type: int + attributes: + [ingest_tags, ingest_partition, ingest_stream, ingest_data_source] + enabled: true + apachedruid.ingest.kinesis.lag.time: + description: Total lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. + unit: ms + gauge: + value_type: int + attributes: [ingest_tags, ingest_stream, ingest_data_source] + enabled: true + apachedruid.ingest.kinesis.max_lag.time: + description: Max lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. + unit: ms + gauge: + value_type: int + attributes: [ingest_tags, ingest_stream, ingest_data_source] + enabled: true + apachedruid.ingest.kinesis.avg_lag.time: + description: Average lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. + unit: ms + gauge: + value_type: int + attributes: [ingest_tags, ingest_stream, ingest_data_source] + enabled: true + apachedruid.ingest.kinesis.partition_lag.time: + description: Partition-wise lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis. Minimum emission period for this metric is a minute. + unit: ms + gauge: + value_type: int + attributes: + [ingest_tags, ingest_partition, ingest_stream, ingest_data_source] + enabled: true + apachedruid.compact.segment_analyzer.fetch_and_process_millis: + description: Time taken to fetch and process segments to infer the schema for the compaction task to run. + unit: 1 + gauge: + value_type: int + attributes: + [ + compact_task_type, + compact_data_source, + compact_group_id, + compact_tags, + compact_task_id, + ] + enabled: true + apachedruid.ingest.events.processed: + description: Number of events processed per emission period. + unit: "{events}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.events.processed_with_error: + description: Number of events processed with some partial errors per emission period. Events processed with partial errors are counted towards both this metric and `ingest/events/processed`. + unit: "{events}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.events.unparseable: + description: Number of events rejected because the events are unparseable. + unit: "{events}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.events.thrown_away: + description: Number of events rejected because they are null, or filtered by `transformSpec`, or outside one of `lateMessageRejectionPeriod`, `earlyMessageRejectionPeriod`, or `windowPeriod`. + unit: "{events}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.events.duplicate: + description: Number of events rejected because the events are duplicated. + unit: "{events}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.input.bytes: + description: Number of bytes read from input sources, after decompression but prior to parsing. This covers all data read, including data that does not end up being fully processed and ingested. For example, this includes data that ends up being rejected for being unparseable or filtered out. + unit: By + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.rows.output: + description: Number of Druid rows persisted. + unit: "{rows}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ingest_task_type, ingest_task_id, ingest_data_source, ingest_group_id] + enabled: true + apachedruid.ingest.persists.count: + description: Number of times persist occurred. + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.persists.time: + description: Milliseconds spent doing intermediate persist. + unit: ms + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.persists.cpu: + description: CPU time in nanoseconds spent on doing intermediate persist. + unit: ns + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.persists.back_pressure: + description: Milliseconds spent creating persist tasks and blocking waiting for them to finish. + unit: ms + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.persists.failed: + description: Number of persists that failed. + unit: "{persists}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.handoff.failed: + description: Number of handoffs that failed. + unit: "{handoffs}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.merge.time: + description: Milliseconds spent merging intermediate segments. + unit: ms + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.merge.cpu: + description: CPU time in Nanoseconds spent on merging intermediate segments. + unit: ns + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.handoff.count: + description: Number of handoffs that happened. + unit: "{handoffs}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.sink.count: + description: Number of sinks not handed off. + unit: "{sinks}" + gauge: + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.events.message_gap: + description: Time gap in milliseconds between the latest ingested event timestamp and the current system timestamp of metrics emission. If the value is increasing but lag is low, Druid may not be receiving new data. This metric is reset as new tasks spawn up. + unit: ms + gauge: + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.notices.queue_size: + description: Number of pending notices to be processed by the coordinator. + unit: "{notices}" + gauge: + value_type: int + attributes: [ingest_tags, ingest_data_source] + enabled: true + apachedruid.ingest.notices.time: + description: Milliseconds taken to process a notice by the supervisor. + unit: ms + gauge: + value_type: int + attributes: [ingest_tags, ingest_data_source] + enabled: true + apachedruid.ingest.pause.time: + description: Milliseconds spent by a task in a paused state without ingesting. + unit: ms + gauge: + value_type: int + attributes: [ingest_tags, ingest_task_id, ingest_data_source] + enabled: true + apachedruid.ingest.handoff.time: + description: Total number of milliseconds taken to handoff a set of segments. + unit: ms + gauge: + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_group_id, + ingest_tags, + ingest_task_id, + ] + enabled: true + apachedruid.task.run.time: + description: Milliseconds taken to run a task. + unit: ms + gauge: + value_type: int + attributes: + [ + task_type, + task_data_source, + task_group_id, + task_status, + task_tags, + task_id, + ] + enabled: true + apachedruid.task.pending.time: + description: Milliseconds taken for a task to wait for running. + unit: ms + gauge: + value_type: int + attributes: [task_type, task_data_source, task_group_id, task_tags, task_id] + enabled: true + apachedruid.task.action.log.time: + description: Milliseconds taken to log a task action to the audit log. + unit: ms + gauge: + value_type: int + attributes: + [ + task_type, + task_data_source, + task_action_type, + task_group_id, + task_tags, + task_id, + ] + enabled: true + apachedruid.task.action.run.time: + description: Milliseconds taken to execute a task action. + unit: ms + gauge: + value_type: int + attributes: + [ + task_type, + task_data_source, + task_action_type, + task_group_id, + task_tags, + task_id, + ] + enabled: true + apachedruid.task.action.success.count: + description: Number of task actions that were executed successfully during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + unit: "{actions}" + gauge: + value_type: int + attributes: + [ + task_type, + task_data_source, + task_action_type, + task_group_id, + task_tags, + task_id, + ] + enabled: true + apachedruid.task.action.failed.count: + description: Number of task actions that failed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + unit: "{actions}" + gauge: + value_type: int + attributes: + [ + task_type, + task_data_source, + task_action_type, + task_group_id, + task_tags, + task_id, + ] + enabled: true + apachedruid.task.action.batch.queue_time: + description: Milliseconds spent by a batch of task actions in queue. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + unit: ms + gauge: + value_type: int + attributes: [task_interval, task_data_source, task_action_type] + enabled: true + apachedruid.task.action.batch.run_time: + description: Milliseconds taken to execute a batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + unit: ms + gauge: + value_type: int + attributes: [task_interval, task_data_source, task_action_type] + enabled: true + apachedruid.task.action.batch.size: + description: Number of task actions in a batch that was executed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + unit: "{actions}" + gauge: + value_type: int + attributes: [task_interval, task_data_source, task_action_type] + enabled: true + apachedruid.task.action.batch.attempts: + description: Number of execution attempts for a single batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). + unit: "{attempts}" + gauge: + value_type: int + attributes: [task_interval, task_data_source, task_action_type] + enabled: true + apachedruid.task.segment_availability.wait.time: + description: The amount of milliseconds a batch indexing task waited for newly created segments to become available for querying. + unit: ms + gauge: + value_type: int + attributes: + [ + task_type, + task_data_source, + task_group_id, + task_segment_availability_confirmed, + task_tags, + task_id, + ] + enabled: true + apachedruid.segment.added.bytes: + description: Size in bytes of new segments created. + unit: By + gauge: + value_type: int + attributes: + [ + segment_task_type, + segment_data_source, + segment_group_id, + segment_tags, + segment_task_id, + segment_interval, + ] + enabled: true + apachedruid.segment.moved.bytes: + description: Size in bytes of segments moved/archived via the Move Task. + unit: By + gauge: + value_type: int + attributes: + [ + segment_task_type, + segment_data_source, + segment_group_id, + segment_tags, + segment_task_id, + segment_interval, + ] + enabled: true + apachedruid.segment.nuked.bytes: + description: Size in bytes of segments deleted via the Kill Task. + unit: By + gauge: + value_type: int + attributes: + [ + segment_task_type, + segment_data_source, + segment_group_id, + segment_tags, + segment_task_id, + segment_interval, + ] + enabled: true + apachedruid.task.success.count: + description: Number of successful tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [task_data_source] + enabled: true + apachedruid.task.failed.count: + description: Number of failed tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [task_data_source] + enabled: true + apachedruid.task.running.count: + description: Number of current running tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [task_data_source] + enabled: true + apachedruid.task.pending.count: + description: Number of current pending tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [task_data_source] + enabled: true + apachedruid.task.waiting.count: + description: Number of current waiting tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [task_data_source] + enabled: true + apachedruid.task_slot.total.count: + description: Number of total task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + unit: "{slots}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [taskSlot_category] + enabled: true + apachedruid.task_slot.idle.count: + description: Number of idle task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + unit: "{slots}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [taskSlot_category] + enabled: true + apachedruid.task_slot.used.count: + description: Number of busy task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + unit: "{slots}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [taskSlot_category] + enabled: true + apachedruid.task_slot.lazy.count: + description: Number of total task slots in lazy marked Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + unit: "{slots}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [taskSlot_category] + enabled: true + apachedruid.task_slot.blacklisted.count: + description: Number of total task slots in blacklisted Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. + unit: "{slots}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [taskSlot_category] + enabled: true + apachedruid.worker.task.failed.count: + description: Number of failed tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [worker_category, worker_version] + enabled: true + apachedruid.worker.task.success.count: + description: Number of successful tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [worker_category, worker_version] + enabled: true + apachedruid.worker.task_slot.idle.count: + description: Number of idle task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. + unit: "{slots}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [worker_category, worker_version] + enabled: true + apachedruid.worker.task_slot.total.count: + description: Number of total task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included. + unit: "{slots}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [worker_category, worker_version] + enabled: true + apachedruid.worker.task_slot.used.count: + description: Number of busy task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included. + unit: "{slots}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [worker_category, worker_version] + enabled: true + apachedruid.ingest.shuffle.bytes: + description: Number of bytes shuffled per emission period. + unit: By + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [ingest_supervisor_task_id] + enabled: true + apachedruid.ingest.shuffle.requests: + description: Number of shuffle requests per emission period. + unit: "{requests}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [ingest_supervisor_task_id] + enabled: true + apachedruid.segment.assigned.count: + description: Number of segments assigned to be loaded in the cluster. + unit: "{segments}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_tier, segment_data_source] + enabled: true + apachedruid.segment.moved.count: + description: Number of segments moved in the cluster. + unit: "{segments}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_tier, segment_data_source] + enabled: true + apachedruid.segment.dropped.count: + description: Number of segments chosen to be dropped from the cluster due to being over-replicated. + unit: "{segments}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_tier, segment_data_source] + enabled: true + apachedruid.segment.deleted.count: + description: Number of segments marked as unused due to drop rules. + unit: "{segments}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.segment.unneeded.count: + description: Number of segments dropped due to being marked as unused. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_tier, segment_data_source] + enabled: true + apachedruid.segment.assign_skipped.count: + description: Number of segments that could not be assigned to any server for loading. This can occur due to replication throttling, no available disk space, or a full load queue. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_description, segment_tier, segment_data_source] + enabled: true + apachedruid.segment.move_skipped.count: + description: Number of segments that were chosen for balancing but could not be moved. This can occur when segments are already optimally placed. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_description, segment_tier, segment_data_source] + enabled: true + apachedruid.segment.drop_skipped.count: + description: Number of segments that could not be dropped from any server. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_description, segment_tier, segment_data_source] + enabled: true + apachedruid.segment.load_queue.size: + description: Size in bytes of segments to load. + unit: By + gauge: + value_type: int + attributes: [segment_server] + enabled: true + apachedruid.segment.load_queue.count: + description: Number of segments to load. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_server] + enabled: true + apachedruid.segment.drop_queue.count: + description: Number of segments to drop. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_server] + enabled: true + apachedruid.segment.load_queue.assigned: + description: Number of segments assigned for load or drop to the load queue of a server. + unit: "{segments}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_server, segment_data_source] + enabled: true + apachedruid.segment.load_queue.success: + description: Number of segment assignments that completed successfully. + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_server, segment_data_source] + enabled: true + apachedruid.segment.load_queue.failed: + description: Number of segment assignments that failed to complete. + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_server, segment_data_source] + enabled: true + apachedruid.segment.load_queue.cancelled: + description: Number of segment assignments that were canceled before completion. + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_server, segment_data_source] + enabled: true + apachedruid.segment.size: + description: Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs. + unit: 1 + gauge: + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.segment.count: + description: Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_priority, segment_tier, segment_data_source] + enabled: true + apachedruid.segment.over_shadowed.count: + description: Number of segments marked as unused due to being overshadowed. + unit: "{segments}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.segment.unavailable.count: + description: Number of unique segments left to load until all used segments are available for queries. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.segment.under_replicated.count: + description: Number of segments, including replicas, left to load until all used segments are available for queries. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_tier, segment_data_source] + enabled: true + apachedruid.tier.historical.count: + description: Number of available historical nodes in each tier. + unit: 1 + gauge: + value_type: int + attributes: [tier] + enabled: true + apachedruid.tier.replication.factor: + description: Configured maximum replication factor in each tier. + unit: 1 + gauge: + value_type: int + attributes: [tier] + enabled: true + apachedruid.tier.required.capacity: + description: Total capacity in bytes required in each tier. + unit: By + gauge: + value_type: int + attributes: [tier] + enabled: true + apachedruid.tier.total.capacity: + description: Total capacity in bytes available in each tier. + unit: By + gauge: + value_type: int + attributes: [tier] + enabled: true + apachedruid.compact.task.count: + description: Number of tasks issued in the auto compaction run. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.compact_task.max_slot.count: + description: Maximum number of task slots available for auto compaction tasks in the auto compaction run. + unit: "{slots}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.compact_task.available_slot.count: + description: Number of available task slots that can be used for auto compaction tasks in the auto compaction run. This is the max number of task slots minus any currently running compaction tasks. + unit: "{slots}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.kill_task.available_slot.count: + description: Number of available task slots that can be used for auto kill tasks in the auto kill run. This is the max number of task slots minus any currently running auto kill tasks. + unit: "{slots}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.kill_task.max_slot.count: + description: Maximum number of task slots available for auto kill tasks in the auto kill run. + unit: "{slots}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.kill.task.count: + description: Number of tasks issued in the auto kill run. + unit: "{tasks}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.kill.pending_segments.count: + description: Number of stale pending segments deleted from the metadata store. + unit: "{segments}" + gauge: + value_type: int + attributes: [kill_data_source] + enabled: true + apachedruid.segment.wait_compact.bytes: + description: Total bytes of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). + unit: By + gauge: + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.segment.wait_compact.count: + description: Total number of segments of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.interval.wait_compact.count: + description: Total number of intervals of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). + unit: "{intervals}" + gauge: + value_type: int + attributes: [interval_data_source] + enabled: true + apachedruid.segment.compacted.bytes: + description: Total bytes of this datasource that are already compacted with the spec set in the auto compaction config. + unit: By + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.segment.compacted.count: + description: Total number of segments of this datasource that are already compacted with the spec set in the auto compaction config. + unit: "{segments}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.interval.compacted.count: + description: Total number of intervals of this datasource that are already compacted with the spec set in the auto compaction config. + unit: "{intervals}" + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [interval_data_source] + enabled: true + apachedruid.segment.skip_compact.bytes: + description: Total bytes of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. + unit: By + gauge: + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.segment.skip_compact.count: + description: Total number of segments of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. + unit: "{segments}" + gauge: + value_type: int + attributes: [segment_data_source] + enabled: true + apachedruid.interval.skip_compact.count: + description: Total number of intervals of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. + unit: "{intervals}" + gauge: + value_type: int + attributes: [interval_data_source] + enabled: true + apachedruid.coordinator.time: + description: Approximate Coordinator duty runtime in milliseconds. + unit: ms + gauge: + value_type: int + attributes: [coordinator_duty] + enabled: true + apachedruid.coordinator.global.time: + description: Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. For example, Historical Management or Indexing. + unit: ms + gauge: + value_type: int + attributes: [coordinator_duty_group] + enabled: true + apachedruid.metadata.kill.supervisor.count: + description: Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true. + unit: "{supervisors}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.metadata.kill.audit.count: + description: Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. This metric is emitted only when `druid.coordinator.kill.audit.on` is set to true. + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [] + enabled: true + apachedruid.metadata.kill.compaction.count: + description: Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. This metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.metadata.kill.rule.count: + description: Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.rule.on` is set to true. + unit: "{rules}" + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.metadata.kill.datasource.count: + description: Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run. Note that datasource metadata only exists for datasource created from supervisor. This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.segment.max: + description: Maximum byte limit available for segments. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.segment.used: + description: Bytes used for served segments. + unit: By + gauge: + value_type: int + attributes: [segment_priority, segment_tier, segment_data_source] + enabled: true + apachedruid.segment.used_percent: + description: Percentage of space used by served segments. + unit: 1.0 + gauge: + value_type: double + attributes: [segment_priority, segment_tier, segment_data_source] + enabled: true + apachedruid.segment.pending_delete: + description: On-disk size in bytes of segments that are waiting to be cleared out. + unit: By + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.segment.row_count.avg: + description: The average number of rows per segment on a historical. `SegmentStatsMonitor` must be enabled. + unit: "{rows}" + gauge: + value_type: int + attributes: [segment_priority, segment_tier, segment_data_source] + enabled: true + apachedruid.segment.row_count.range.count: + description: The number of segments in a bucket. `SegmentStatsMonitor` must be enabled. + unit: "{segments}" + gauge: + value_type: int + attributes: + [segment_priority, segment_tier, segment_data_source, segment_range] + enabled: true + apachedruid.jvm.pool.committed: + description: Committed pool. + unit: By + gauge: + value_type: int + attributes: [jvm_pool_name, jvm_pool_kind] + enabled: true + apachedruid.jvm.pool.init: + description: Initial pool. + unit: By + gauge: + value_type: int + attributes: [jvm_pool_name, jvm_pool_kind] + enabled: true + apachedruid.jvm.pool.max: + description: Max pool. + unit: By + gauge: + value_type: int + attributes: [jvm_pool_name, jvm_pool_kind] + enabled: true + apachedruid.jvm.pool.used: + description: Pool used. + unit: By + gauge: + value_type: int + attributes: [jvm_pool_name, jvm_pool_kind] + enabled: true + apachedruid.jvm.bufferpool.count: + description: Bufferpool count. + unit: 1 + gauge: + value_type: int + attributes: [jvm_bufferpool_name] + enabled: true + apachedruid.jvm.bufferpool.used: + description: Bufferpool used. + unit: 1 + gauge: + value_type: int + attributes: [jvm_bufferpool_name] + enabled: true + apachedruid.jvm.bufferpool.capacity: + description: Bufferpool capacity. + unit: 1 + gauge: + value_type: int + attributes: [jvm_bufferpool_name] + enabled: true + apachedruid.jvm.mem.init: + description: Initial memory. + unit: By + gauge: + value_type: int + attributes: [jvm_mem_kind] + enabled: true + apachedruid.jvm.mem.max: + description: Max memory. + unit: By + gauge: + value_type: int + attributes: [jvm_mem_kind] + enabled: true + apachedruid.jvm.mem.used: + description: Used memory. + unit: By + gauge: + value_type: int + attributes: [jvm_mem_kind] + enabled: true + apachedruid.jvm.mem.committed: + description: Committed memory. + unit: By + gauge: + value_type: int + attributes: [jvm_mem_kind] + enabled: true + apachedruid.jvm.gc.count: + description: Garbage collection count. + unit: 1 + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [jvm_gc_gen, jvm_gc_name] + enabled: true + apachedruid.jvm.gc.cpu: + description: Count of CPU time in Nanoseconds spent on garbage collection. Note, `jvm/gc/cpu` represents the total time over multiple GC cycles; divide by `jvm/gc/count` to get the mean GC time per cycle. + unit: ns + sum: + monotonic: true + aggregation_temporality: delta + value_type: int + attributes: [jvm_gc_gen, jvm_gc_name] + enabled: true + apachedruid.zk.connected: + description: Indicator of connection status. `1` for connected, `0` for disconnected. Emitted once per monitor period. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.zk.reconnect.time: + description: Amount of time, in milliseconds, that a server was disconnected from ZooKeeper before reconnecting. Emitted on reconnection. Not emitted if connection to ZooKeeper is permanently lost, because in this case, there is no reconnection. + unit: ms + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.ingest.events.buffered: + description: Number of events queued in the `EventReceiverFirehose` buffer. + unit: "{events}" + gauge: + value_type: int + attributes: + [ + ingest_task_type, + ingest_data_source, + ingest_service_name, + ingest_buffer_capacity, + ingest_task_id, + ] + enabled: true + apachedruid.ingest.bytes.received: + description: Number of bytes received by the `EventReceiverFirehose`. + unit: By + gauge: + value_type: int + attributes: + [ + ingest_task_type, + ingest_task_id, + ingest_data_source, + ingest_service_name, + ] + enabled: true + apachedruid.sys.swap.free: + description: Free swap. + unit: By + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.swap.max: + description: Max swap. + unit: By + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.swap.page_in: + description: Paged in swap. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.swap.page_out: + description: Paged out swap. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.disk.write.count: + description: Writes to disk. + unit: 1 + gauge: + value_type: int + attributes: [sys_disk_name] + enabled: true + apachedruid.sys.disk.read.count: + description: Reads from disk. + unit: 1 + gauge: + value_type: int + attributes: [sys_disk_name] + enabled: true + apachedruid.sys.disk.write.size: + description: Bytes written to disk. One indicator of the amount of paging occurring for segments. + unit: By + gauge: + value_type: int + attributes: [sys_disk_name] + enabled: true + apachedruid.sys.disk.read.size: + description: Bytes read from disk. One indicator of the amount of paging occurring for segments. + unit: By + gauge: + value_type: int + attributes: [sys_disk_name] + enabled: true + apachedruid.sys.disk.queue: + description: Disk queue length. Measures number of requests waiting to be processed by disk. + unit: 1 + gauge: + value_type: int + attributes: [sys_disk_name] + enabled: true + apachedruid.sys.disk.transfer_time: + description: Transfer time to read from or write to disk. + unit: ms + gauge: + value_type: int + attributes: [sys_disk_name] + enabled: true + apachedruid.sys.net.write.size: + description: Bytes written to the network. + unit: By + gauge: + value_type: int + attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] + enabled: true + apachedruid.sys.net.read.size: + description: Bytes read from the network. + unit: By + gauge: + value_type: int + attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] + enabled: true + apachedruid.sys.net.read.packets: + description: Total packets read from the network. + unit: 1 + gauge: + value_type: int + attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] + enabled: true + apachedruid.sys.net.write.packets: + description: Total packets written to the network. + unit: 1 + gauge: + value_type: int + attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] + enabled: true + apachedruid.sys.net.read.errors: + description: Total network read errors. + unit: 1 + gauge: + value_type: int + attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] + enabled: true + apachedruid.sys.net.write.errors: + description: Total network write errors. + unit: 1 + gauge: + value_type: int + attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] + enabled: true + apachedruid.sys.net.read.dropped: + description: Total packets dropped coming from network. + unit: 1 + gauge: + value_type: int + attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] + enabled: true + apachedruid.sys.net.write.collisions: + description: Total network write collisions. + unit: 1 + gauge: + value_type: int + attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] + enabled: true + apachedruid.sys.fs.used: + description: Filesystem bytes used. + unit: By + gauge: + value_type: int + attributes: [sys_fs_dir_name, sys_fs_dev_name] + enabled: true + apachedruid.sys.fs.max: + description: Filesystem bytes max. + unit: By + gauge: + value_type: int + attributes: [sys_fs_dir_name, sys_fs_dev_name] + enabled: true + apachedruid.sys.fs.files.count: + description: Filesystem total IO nodes. + unit: 1 + gauge: + value_type: int + attributes: [sys_fs_dir_name, sys_fs_dev_name] + enabled: true + apachedruid.sys.fs.files.free: + description: Filesystem free IO nodes. + unit: 1 + gauge: + value_type: int + attributes: [sys_fs_dir_name, sys_fs_dev_name] + enabled: true + apachedruid.sys.mem.used: + description: Memory used. + unit: By + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.mem.max: + description: Memory max. + unit: By + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.mem.free: + description: Memory free. + unit: By + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.storage.used: + description: Disk space used. + unit: 1 + gauge: + value_type: int + attributes: [sys_fs_dir_name] + enabled: true + apachedruid.sys.cpu: + description: CPU used. + unit: 1 + gauge: + value_type: int + attributes: [sys_cpu_time, sys_cpu_name] + enabled: true + apachedruid.sys.uptime: + description: Total system uptime. + unit: s + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.la.1: + description: System CPU load averages over past `i` minutes, where `i={1,5,15}`. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.la.5: + description: System CPU load averages over past `i` minutes, where `i={1,5,15}`. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.la.15: + description: System CPU load averages over past `i` minutes, where `i={1,5,15}`. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.active_opens: + description: Total TCP active open connections. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.passive_opens: + description: Total TCP passive open connections. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.attempt_fails: + description: Total TCP active connection failures. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.estab_resets: + description: Total TCP connection resets. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.in.segs: + description: Total segments received in connection. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.in.errs: + description: Errors while reading segments. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.out.segs: + description: Total segments sent. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.out.rsts: + description: Total `out reset` packets sent to reset the connection. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true + apachedruid.sys.tcpv4.retrans.segs: + description: Total segments re-transmitted. + unit: 1 + gauge: + value_type: int + attributes: [] + enabled: true diff --git a/receiver/apachedruidreceiver/receiver.go b/receiver/apachedruidreceiver/receiver.go new file mode 100644 index 0000000000000..a8b513fa58d66 --- /dev/null +++ b/receiver/apachedruidreceiver/receiver.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package apachedruidreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" +) + +type metricsReceiver struct { +} + +func newMetricsReceiver(config *Config, settings receiver.CreateSettings, nextConsumer consumer.Metrics) (*metricsReceiver, error) { + return &metricsReceiver{}, nil +} + +func (r *metricsReceiver) Start(_ context.Context, host component.Host) error { + return nil +} + +func (r *metricsReceiver) Shutdown(_ context.Context) error { + return nil +} diff --git a/receiver/apachedruidreceiver/receiver_test.go b/receiver/apachedruidreceiver/receiver_test.go new file mode 100644 index 0000000000000..dbbc980df3644 --- /dev/null +++ b/receiver/apachedruidreceiver/receiver_test.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package apachedruidreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" +) + +func TestWriteLineProtocol_v2API(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + config := &Config{ + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: addr, + }, + } + nextConsumer := new(mockConsumer) + + receiver, outerErr := NewFactory().CreateMetricsReceiver(context.Background(), receivertest.NewNopCreateSettings(), config, nextConsumer) + require.NoError(t, outerErr) + require.NotNil(t, receiver) + + require.NoError(t, receiver.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { require.NoError(t, receiver.Shutdown(context.Background())) }) + + t.Run("Apache-Druid-HTTP-emitter-client", func(t *testing.T) { + nextConsumer.lastMetricsConsumed = pmetric.NewMetrics() + + }) + +} + +type mockConsumer struct { + lastMetricsConsumed pmetric.Metrics +} + +func (m *mockConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +func (m *mockConsumer) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { + m.lastMetricsConsumed = pmetric.NewMetrics() + md.CopyTo(m.lastMetricsConsumed) + return nil +} diff --git a/versions.yaml b/versions.yaml index 2674ea8757c15..f721ec7eace7b 100644 --- a/versions.yaml +++ b/versions.yaml @@ -170,6 +170,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/processor/remotetapprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/activedirectorydsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachesparkreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchmetricsreceiver From a116ddc727f83405a6a10d7aee9ecdd076613df1 Mon Sep 17 00:00:00 2001 From: Yuanli Han Date: Wed, 20 Mar 2024 09:56:44 +0800 Subject: [PATCH 2/8] Update receiver/apachedruidreceiver/metadata.yaml Co-authored-by: Antoine Toulme --- receiver/apachedruidreceiver/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/apachedruidreceiver/metadata.yaml b/receiver/apachedruidreceiver/metadata.yaml index 4542eeb8cdb8b..03026344e8eae 100644 --- a/receiver/apachedruidreceiver/metadata.yaml +++ b/receiver/apachedruidreceiver/metadata.yaml @@ -4,7 +4,7 @@ status: class: receiver stability: development: [metrics, logs] - distributions: [contrib, observiq, sumo] + distributions: [] codeowners: active: [yuanlihan] From 930443ba4eeadf9cb056292b46a635ae135f250a Mon Sep 17 00:00:00 2001 From: Yuanli Han Date: Wed, 20 Mar 2024 09:56:57 +0800 Subject: [PATCH 3/8] Update receiver/apachedruidreceiver/metadata.yaml Co-authored-by: Curtis Robert --- receiver/apachedruidreceiver/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/apachedruidreceiver/metadata.yaml b/receiver/apachedruidreceiver/metadata.yaml index 03026344e8eae..03b9cc895a3d6 100644 --- a/receiver/apachedruidreceiver/metadata.yaml +++ b/receiver/apachedruidreceiver/metadata.yaml @@ -6,7 +6,7 @@ status: development: [metrics, logs] distributions: [] codeowners: - active: [yuanlihan] + active: [yuanlihan, atoulme] resource_attributes: apachedruid.cluster.name: From d2b730e57842dee1158ceae4565bbff34850f78f Mon Sep 17 00:00:00 2001 From: Yuanli Han Date: Thu, 21 Mar 2024 16:28:29 +0800 Subject: [PATCH 4/8] add goleak check and update code owners --- .github/CODEOWNERS | 1 + receiver/apachedruidreceiver/README.md | 7 ++----- receiver/apachedruidreceiver/go.mod | 1 + .../internal/metadata/package_test.go | 14 ++++++++++++++ receiver/apachedruidreceiver/metadata.yaml | 2 +- receiver/apachedruidreceiver/package_test.go | 14 ++++++++++++++ 6 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 receiver/apachedruidreceiver/internal/metadata/package_test.go create mode 100644 receiver/apachedruidreceiver/package_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ef3f4316fc33e..5750726ba889f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -180,6 +180,7 @@ processor/transformprocessor/ @open-telemetry/collect receiver/activedirectorydsreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @BinaryFissionGames receiver/aerospikereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @antonblock +receiver/apachedruidreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @yuanlihan receiver/apachereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski receiver/apachesparkreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @Caleb-Hurshman @mrsillydog receiver/awscloudwatchmetricsreceiver/ @open-telemetry/collector-contrib-approvers @jpkrohling diff --git a/receiver/apachedruidreceiver/README.md b/receiver/apachedruidreceiver/README.md index ca638b75263fa..954453d8e30c4 100644 --- a/receiver/apachedruidreceiver/README.md +++ b/receiver/apachedruidreceiver/README.md @@ -4,14 +4,11 @@ | Status | | | ------------- |-----------| | Stability | [development]: metrics, logs | -| Distributions | [contrib], [observiq], [sumo] | +| Distributions | [] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fapachedruid%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fapachedruid) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fapachedruid%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fapachedruid) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@yuanlihan](https://www.github.com/yuanlihan) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@atoulme](https://www.github.com/atoulme), [@yuanlihan](https://www.github.com/yuanlihan) | [development]: https://github.com/open-telemetry/opentelemetry-collector#development -[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -[observiq]: https://github.com/observIQ/observiq-otel-collector -[sumo]: https://github.com/SumoLogic/sumologic-otel-collector This receiver accepts [metrics](https://druid.apache.org/docs/latest/operations/metrics) data from the [HTTP Emitter](https://druid.apache.org/docs/latest/configuration/#http-emitter-module) of Apache Druid. diff --git a/receiver/apachedruidreceiver/go.mod b/receiver/apachedruidreceiver/go.mod index 1921d588dcd0d..517bd888fd6dd 100644 --- a/receiver/apachedruidreceiver/go.mod +++ b/receiver/apachedruidreceiver/go.mod @@ -14,6 +14,7 @@ require ( go.opentelemetry.io/collector/receiver v0.90.0 go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 + go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 ) diff --git a/receiver/apachedruidreceiver/internal/metadata/package_test.go b/receiver/apachedruidreceiver/internal/metadata/package_test.go new file mode 100644 index 0000000000000..1aba5ec4bb0b5 --- /dev/null +++ b/receiver/apachedruidreceiver/internal/metadata/package_test.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/receiver/apachedruidreceiver/metadata.yaml b/receiver/apachedruidreceiver/metadata.yaml index 03b9cc895a3d6..18c0c82067736 100644 --- a/receiver/apachedruidreceiver/metadata.yaml +++ b/receiver/apachedruidreceiver/metadata.yaml @@ -6,7 +6,7 @@ status: development: [metrics, logs] distributions: [] codeowners: - active: [yuanlihan, atoulme] + active: [atoulme, yuanlihan] resource_attributes: apachedruid.cluster.name: diff --git a/receiver/apachedruidreceiver/package_test.go b/receiver/apachedruidreceiver/package_test.go new file mode 100644 index 0000000000000..c21328a45eaf8 --- /dev/null +++ b/receiver/apachedruidreceiver/package_test.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package apachedruidreceiver + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} From eb545b21a198a700a4302dda2c1f1d309afac889 Mon Sep 17 00:00:00 2001 From: Yuanli Han Date: Mon, 25 Mar 2024 15:29:17 +0800 Subject: [PATCH 5/8] Update receiver/apachedruidreceiver/README.md Co-authored-by: Antoine Toulme --- receiver/apachedruidreceiver/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/apachedruidreceiver/README.md b/receiver/apachedruidreceiver/README.md index 954453d8e30c4..301cf15da382d 100644 --- a/receiver/apachedruidreceiver/README.md +++ b/receiver/apachedruidreceiver/README.md @@ -21,7 +21,7 @@ Apache Druid cluster needs to be configured to enable metrics, see [Enabling Met The following configuration options are supported: -- `endpoint` (default = 0.0.0.0:9000) HTTP service endpoint for the line protocol receiver +- `endpoint` (default = localhost:9000) HTTP service endpoint for the line protocol receiver - `metrics_path` (default = `/services/collector/metrics`) The path accepting Apache Druid metrics. - `logs_path` (default = `/services/collector/logs`) The path accepting Apache Druid logs. - `cluster_name` (default = `default`) The default name of Druid cluster. Note that the [HTTP Emitter](https://druid.apache.org/docs/latest/configuration/#http-emitter-module) of Apache Druid `28.0.0` doesn't include cluster name information in metrics. From 46002dc4e1812508fc36a8977383f049aadc8472 Mon Sep 17 00:00:00 2001 From: Yuanli Han Date: Mon, 25 Mar 2024 15:56:57 +0800 Subject: [PATCH 6/8] remove metrics descriptions from the first PR as suggested --- receiver/apachedruidreceiver/README.md | 2 +- receiver/apachedruidreceiver/config.go | 2 +- receiver/apachedruidreceiver/factory.go | 4 +- .../generated_component_test.go | 7 - receiver/apachedruidreceiver/go.mod | 5 +- receiver/apachedruidreceiver/go.sum | 4 +- .../internal/metadata/generated_config.go | 1066 -- .../metadata/generated_config_test.go | 606 - .../internal/metadata/generated_metrics.go | 14859 ---------------- .../metadata/generated_metrics_test.go | 5541 ------ .../internal/metadata/generated_resource.go | 50 - .../metadata/generated_resource_test.go | 52 - .../internal/metadata/generated_status.go | 1 - .../internal/metadata/package_test.go | 14 - .../internal/metadata/testdata/config.yaml | 999 -- receiver/apachedruidreceiver/metadata.yaml | 2534 +-- receiver/apachedruidreceiver/receiver_test.go | 2 +- 17 files changed, 10 insertions(+), 25738 deletions(-) delete mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_config.go delete mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_config_test.go delete mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_metrics.go delete mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_metrics_test.go delete mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_resource.go delete mode 100644 receiver/apachedruidreceiver/internal/metadata/generated_resource_test.go delete mode 100644 receiver/apachedruidreceiver/internal/metadata/package_test.go delete mode 100644 receiver/apachedruidreceiver/internal/metadata/testdata/config.yaml diff --git a/receiver/apachedruidreceiver/README.md b/receiver/apachedruidreceiver/README.md index 301cf15da382d..290f63dc951ae 100644 --- a/receiver/apachedruidreceiver/README.md +++ b/receiver/apachedruidreceiver/README.md @@ -3,7 +3,7 @@ | Status | | | ------------- |-----------| -| Stability | [development]: metrics, logs | +| Stability | [development]: metrics | | Distributions | [] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fapachedruid%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fapachedruid) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fapachedruid%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fapachedruid) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@atoulme](https://www.github.com/atoulme), [@yuanlihan](https://www.github.com/yuanlihan) | diff --git a/receiver/apachedruidreceiver/config.go b/receiver/apachedruidreceiver/config.go index 5f5acec4f93c7..2e4367042b43a 100644 --- a/receiver/apachedruidreceiver/config.go +++ b/receiver/apachedruidreceiver/config.go @@ -9,7 +9,7 @@ import ( // Config defines configuration for the Apache Druid receiver. type Config struct { - confighttp.HTTPServerSettings `mapstructure:",squash"` + confighttp.ServerConfig `mapstructure:",squash"` // MetricsPath for metrics data collection, default is '/services/collector/metrics' MetricsPath string `mapstructure:"metrics_path"` diff --git a/receiver/apachedruidreceiver/factory.go b/receiver/apachedruidreceiver/factory.go index 8b6e02a80c1e6..ca3a6d030b036 100644 --- a/receiver/apachedruidreceiver/factory.go +++ b/receiver/apachedruidreceiver/factory.go @@ -24,8 +24,8 @@ func NewFactory() receiver.Factory { // createDefaultConfig creates the default configuration for receiver. func createDefaultConfig() component.Config { return &Config{ - HTTPServerSettings: confighttp.HTTPServerSettings{ - Endpoint: "0.0.0.0:9000", + HTTPServerSettings: confighttp.ServerConfig{ + Endpoint: "localhost:9000", }, MetricsPath: "/services/collector/metrics", ClusterName: "default", diff --git a/receiver/apachedruidreceiver/generated_component_test.go b/receiver/apachedruidreceiver/generated_component_test.go index 0d79e7456cb5e..9cd74d422b5be 100644 --- a/receiver/apachedruidreceiver/generated_component_test.go +++ b/receiver/apachedruidreceiver/generated_component_test.go @@ -23,13 +23,6 @@ func TestComponentLifecycle(t *testing.T) { createFn func(ctx context.Context, set receiver.CreateSettings, cfg component.Config) (component.Component, error) }{ - { - name: "logs", - createFn: func(ctx context.Context, set receiver.CreateSettings, cfg component.Config) (component.Component, error) { - return factory.CreateLogsReceiver(ctx, set, cfg, consumertest.NewNop()) - }, - }, - { name: "metrics", createFn: func(ctx context.Context, set receiver.CreateSettings, cfg component.Config) (component.Component, error) { diff --git a/receiver/apachedruidreceiver/go.mod b/receiver/apachedruidreceiver/go.mod index 517bd888fd6dd..3e7dc507cdd6b 100644 --- a/receiver/apachedruidreceiver/go.mod +++ b/receiver/apachedruidreceiver/go.mod @@ -3,7 +3,6 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apache go 1.21 require ( - github.com/google/go-cmp v0.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.90.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.90.0 @@ -15,7 +14,6 @@ require ( go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 go.uber.org/goleak v1.3.0 - go.uber.org/zap v1.27.0 ) require ( @@ -49,10 +47,11 @@ require ( go.opentelemetry.io/collector/config/internal v0.90.0 // indirect go.opentelemetry.io/collector/extension v0.90.0 // indirect go.opentelemetry.io/collector/extension/auth v0.90.0 // indirect - go.opentelemetry.io/collector/featuregate v1.3.1-0.20240306115632-b2693620eff6 // indirect + go.opentelemetry.io/collector/featuregate v1.3.1-0.20240315172937-3b5aee0c7a16 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect go.opentelemetry.io/otel v1.21.0 // indirect go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/net v0.18.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect diff --git a/receiver/apachedruidreceiver/go.sum b/receiver/apachedruidreceiver/go.sum index d4741a4bffedf..fd48b4070e91d 100644 --- a/receiver/apachedruidreceiver/go.sum +++ b/receiver/apachedruidreceiver/go.sum @@ -88,8 +88,8 @@ go.opentelemetry.io/collector/extension v0.90.0 h1:NDvZneZEapDeOD195kDZiEW8IUb2S go.opentelemetry.io/collector/extension v0.90.0/go.mod h1:vUiLcJQuM04CuyCf6AbjW8OCSeINSU4242GPVzTzX9w= go.opentelemetry.io/collector/extension/auth v0.90.0 h1:L5UfHQ0jXMllC7nB4l9EAXeAEExlsvwJOr22sB+55Cs= go.opentelemetry.io/collector/extension/auth v0.90.0/go.mod h1:x/U5M+J3Xjmcec94j3v79s8vjsLMaUrN5abjcal0sEw= -go.opentelemetry.io/collector/featuregate v1.3.1-0.20240306115632-b2693620eff6 h1:WPX5pMQgNPvjLrtQ+XoBBsbyhy1m1JtYc1B/rIFhCnQ= -go.opentelemetry.io/collector/featuregate v1.3.1-0.20240306115632-b2693620eff6/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= +go.opentelemetry.io/collector/featuregate v1.3.1-0.20240315172937-3b5aee0c7a16 h1:6H0vZiRXlvvob+ejs59g6iTSat2DkuB5RCvL71lhzIg= +go.opentelemetry.io/collector/featuregate v1.3.1-0.20240315172937-3b5aee0c7a16/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= go.opentelemetry.io/collector/receiver v0.90.0 h1:cVp1s9c9kSfn5ZTXb9o8nlZnLEgs2gutEYzty5+eUEI= diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_config.go b/receiver/apachedruidreceiver/internal/metadata/generated_config.go deleted file mode 100644 index 34b1120429ebc..0000000000000 --- a/receiver/apachedruidreceiver/internal/metadata/generated_config.go +++ /dev/null @@ -1,1066 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import "go.opentelemetry.io/collector/confmap" - -// MetricConfig provides common config for a particular metric. -type MetricConfig struct { - Enabled bool `mapstructure:"enabled"` - - enabledSetByUser bool -} - -func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { - if parser == nil { - return nil - } - err := parser.Unmarshal(ms) - if err != nil { - return err - } - ms.enabledSetByUser = parser.IsSet("enabled") - return nil -} - -// MetricsConfig provides config for apachedruid metrics. -type MetricsConfig struct { - ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis MetricConfig `mapstructure:"apachedruid.compact.segment_analyzer.fetch_and_process_millis"` - ApachedruidCompactTaskCount MetricConfig `mapstructure:"apachedruid.compact.task.count"` - ApachedruidCompactTaskAvailableSlotCount MetricConfig `mapstructure:"apachedruid.compact_task.available_slot.count"` - ApachedruidCompactTaskMaxSlotCount MetricConfig `mapstructure:"apachedruid.compact_task.max_slot.count"` - ApachedruidCoordinatorGlobalTime MetricConfig `mapstructure:"apachedruid.coordinator.global.time"` - ApachedruidCoordinatorTime MetricConfig `mapstructure:"apachedruid.coordinator.time"` - ApachedruidIngestBytesReceived MetricConfig `mapstructure:"apachedruid.ingest.bytes.received"` - ApachedruidIngestCount MetricConfig `mapstructure:"apachedruid.ingest.count"` - ApachedruidIngestEventsBuffered MetricConfig `mapstructure:"apachedruid.ingest.events.buffered"` - ApachedruidIngestEventsDuplicate MetricConfig `mapstructure:"apachedruid.ingest.events.duplicate"` - ApachedruidIngestEventsMessageGap MetricConfig `mapstructure:"apachedruid.ingest.events.message_gap"` - ApachedruidIngestEventsProcessed MetricConfig `mapstructure:"apachedruid.ingest.events.processed"` - ApachedruidIngestEventsProcessedWithError MetricConfig `mapstructure:"apachedruid.ingest.events.processed_with_error"` - ApachedruidIngestEventsThrownAway MetricConfig `mapstructure:"apachedruid.ingest.events.thrown_away"` - ApachedruidIngestEventsUnparseable MetricConfig `mapstructure:"apachedruid.ingest.events.unparseable"` - ApachedruidIngestHandoffCount MetricConfig `mapstructure:"apachedruid.ingest.handoff.count"` - ApachedruidIngestHandoffFailed MetricConfig `mapstructure:"apachedruid.ingest.handoff.failed"` - ApachedruidIngestHandoffTime MetricConfig `mapstructure:"apachedruid.ingest.handoff.time"` - ApachedruidIngestInputBytes MetricConfig `mapstructure:"apachedruid.ingest.input.bytes"` - ApachedruidIngestKafkaAvgLag MetricConfig `mapstructure:"apachedruid.ingest.kafka.avg_lag"` - ApachedruidIngestKafkaLag MetricConfig `mapstructure:"apachedruid.ingest.kafka.lag"` - ApachedruidIngestKafkaMaxLag MetricConfig `mapstructure:"apachedruid.ingest.kafka.max_lag"` - ApachedruidIngestKafkaPartitionLag MetricConfig `mapstructure:"apachedruid.ingest.kafka.partition_lag"` - ApachedruidIngestKinesisAvgLagTime MetricConfig `mapstructure:"apachedruid.ingest.kinesis.avg_lag.time"` - ApachedruidIngestKinesisLagTime MetricConfig `mapstructure:"apachedruid.ingest.kinesis.lag.time"` - ApachedruidIngestKinesisMaxLagTime MetricConfig `mapstructure:"apachedruid.ingest.kinesis.max_lag.time"` - ApachedruidIngestKinesisPartitionLagTime MetricConfig `mapstructure:"apachedruid.ingest.kinesis.partition_lag.time"` - ApachedruidIngestMergeCPU MetricConfig `mapstructure:"apachedruid.ingest.merge.cpu"` - ApachedruidIngestMergeTime MetricConfig `mapstructure:"apachedruid.ingest.merge.time"` - ApachedruidIngestNoticesQueueSize MetricConfig `mapstructure:"apachedruid.ingest.notices.queue_size"` - ApachedruidIngestNoticesTime MetricConfig `mapstructure:"apachedruid.ingest.notices.time"` - ApachedruidIngestPauseTime MetricConfig `mapstructure:"apachedruid.ingest.pause.time"` - ApachedruidIngestPersistsBackPressure MetricConfig `mapstructure:"apachedruid.ingest.persists.back_pressure"` - ApachedruidIngestPersistsCount MetricConfig `mapstructure:"apachedruid.ingest.persists.count"` - ApachedruidIngestPersistsCPU MetricConfig `mapstructure:"apachedruid.ingest.persists.cpu"` - ApachedruidIngestPersistsFailed MetricConfig `mapstructure:"apachedruid.ingest.persists.failed"` - ApachedruidIngestPersistsTime MetricConfig `mapstructure:"apachedruid.ingest.persists.time"` - ApachedruidIngestRowsOutput MetricConfig `mapstructure:"apachedruid.ingest.rows.output"` - ApachedruidIngestSegmentsCount MetricConfig `mapstructure:"apachedruid.ingest.segments.count"` - ApachedruidIngestShuffleBytes MetricConfig `mapstructure:"apachedruid.ingest.shuffle.bytes"` - ApachedruidIngestShuffleRequests MetricConfig `mapstructure:"apachedruid.ingest.shuffle.requests"` - ApachedruidIngestSinkCount MetricConfig `mapstructure:"apachedruid.ingest.sink.count"` - ApachedruidIngestTombstonesCount MetricConfig `mapstructure:"apachedruid.ingest.tombstones.count"` - ApachedruidIntervalCompactedCount MetricConfig `mapstructure:"apachedruid.interval.compacted.count"` - ApachedruidIntervalSkipCompactCount MetricConfig `mapstructure:"apachedruid.interval.skip_compact.count"` - ApachedruidIntervalWaitCompactCount MetricConfig `mapstructure:"apachedruid.interval.wait_compact.count"` - ApachedruidJettyNumOpenConnections MetricConfig `mapstructure:"apachedruid.jetty.num_open_connections"` - ApachedruidJettyThreadPoolBusy MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.busy"` - ApachedruidJettyThreadPoolIdle MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.idle"` - ApachedruidJettyThreadPoolIsLowOnThreads MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.is_low_on_threads"` - ApachedruidJettyThreadPoolMax MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.max"` - ApachedruidJettyThreadPoolMin MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.min"` - ApachedruidJettyThreadPoolQueueSize MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.queue_size"` - ApachedruidJettyThreadPoolTotal MetricConfig `mapstructure:"apachedruid.jetty.thread_pool.total"` - ApachedruidJvmBufferpoolCapacity MetricConfig `mapstructure:"apachedruid.jvm.bufferpool.capacity"` - ApachedruidJvmBufferpoolCount MetricConfig `mapstructure:"apachedruid.jvm.bufferpool.count"` - ApachedruidJvmBufferpoolUsed MetricConfig `mapstructure:"apachedruid.jvm.bufferpool.used"` - ApachedruidJvmGcCount MetricConfig `mapstructure:"apachedruid.jvm.gc.count"` - ApachedruidJvmGcCPU MetricConfig `mapstructure:"apachedruid.jvm.gc.cpu"` - ApachedruidJvmMemCommitted MetricConfig `mapstructure:"apachedruid.jvm.mem.committed"` - ApachedruidJvmMemInit MetricConfig `mapstructure:"apachedruid.jvm.mem.init"` - ApachedruidJvmMemMax MetricConfig `mapstructure:"apachedruid.jvm.mem.max"` - ApachedruidJvmMemUsed MetricConfig `mapstructure:"apachedruid.jvm.mem.used"` - ApachedruidJvmPoolCommitted MetricConfig `mapstructure:"apachedruid.jvm.pool.committed"` - ApachedruidJvmPoolInit MetricConfig `mapstructure:"apachedruid.jvm.pool.init"` - ApachedruidJvmPoolMax MetricConfig `mapstructure:"apachedruid.jvm.pool.max"` - ApachedruidJvmPoolUsed MetricConfig `mapstructure:"apachedruid.jvm.pool.used"` - ApachedruidKillPendingSegmentsCount MetricConfig `mapstructure:"apachedruid.kill.pending_segments.count"` - ApachedruidKillTaskCount MetricConfig `mapstructure:"apachedruid.kill.task.count"` - ApachedruidKillTaskAvailableSlotCount MetricConfig `mapstructure:"apachedruid.kill_task.available_slot.count"` - ApachedruidKillTaskMaxSlotCount MetricConfig `mapstructure:"apachedruid.kill_task.max_slot.count"` - ApachedruidMergeBufferPendingRequests MetricConfig `mapstructure:"apachedruid.merge_buffer.pending_requests"` - ApachedruidMetadataKillAuditCount MetricConfig `mapstructure:"apachedruid.metadata.kill.audit.count"` - ApachedruidMetadataKillCompactionCount MetricConfig `mapstructure:"apachedruid.metadata.kill.compaction.count"` - ApachedruidMetadataKillDatasourceCount MetricConfig `mapstructure:"apachedruid.metadata.kill.datasource.count"` - ApachedruidMetadataKillRuleCount MetricConfig `mapstructure:"apachedruid.metadata.kill.rule.count"` - ApachedruidMetadataKillSupervisorCount MetricConfig `mapstructure:"apachedruid.metadata.kill.supervisor.count"` - ApachedruidMetadatacacheInitTime MetricConfig `mapstructure:"apachedruid.metadatacache.init.time"` - ApachedruidMetadatacacheRefreshCount MetricConfig `mapstructure:"apachedruid.metadatacache.refresh.count"` - ApachedruidMetadatacacheRefreshTime MetricConfig `mapstructure:"apachedruid.metadatacache.refresh.time"` - ApachedruidQueryByteLimitExceededCount MetricConfig `mapstructure:"apachedruid.query.byte_limit.exceeded.count"` - ApachedruidQueryBytes MetricConfig `mapstructure:"apachedruid.query.bytes"` - ApachedruidQueryCacheDeltaAverageBytes MetricConfig `mapstructure:"apachedruid.query.cache.delta.average_bytes"` - ApachedruidQueryCacheDeltaErrors MetricConfig `mapstructure:"apachedruid.query.cache.delta.errors"` - ApachedruidQueryCacheDeltaEvictions MetricConfig `mapstructure:"apachedruid.query.cache.delta.evictions"` - ApachedruidQueryCacheDeltaHitRate MetricConfig `mapstructure:"apachedruid.query.cache.delta.hit_rate"` - ApachedruidQueryCacheDeltaHits MetricConfig `mapstructure:"apachedruid.query.cache.delta.hits"` - ApachedruidQueryCacheDeltaMisses MetricConfig `mapstructure:"apachedruid.query.cache.delta.misses"` - ApachedruidQueryCacheDeltaNumEntries MetricConfig `mapstructure:"apachedruid.query.cache.delta.num_entries"` - ApachedruidQueryCacheDeltaPutError MetricConfig `mapstructure:"apachedruid.query.cache.delta.put.error"` - ApachedruidQueryCacheDeltaPutOk MetricConfig `mapstructure:"apachedruid.query.cache.delta.put.ok"` - ApachedruidQueryCacheDeltaPutOversized MetricConfig `mapstructure:"apachedruid.query.cache.delta.put.oversized"` - ApachedruidQueryCacheDeltaSizeBytes MetricConfig `mapstructure:"apachedruid.query.cache.delta.size_bytes"` - ApachedruidQueryCacheDeltaTimeouts MetricConfig `mapstructure:"apachedruid.query.cache.delta.timeouts"` - ApachedruidQueryCacheMemcachedDelta MetricConfig `mapstructure:"apachedruid.query.cache.memcached.delta"` - ApachedruidQueryCacheMemcachedTotal MetricConfig `mapstructure:"apachedruid.query.cache.memcached.total"` - ApachedruidQueryCacheTotalAverageBytes MetricConfig `mapstructure:"apachedruid.query.cache.total.average_bytes"` - ApachedruidQueryCacheTotalErrors MetricConfig `mapstructure:"apachedruid.query.cache.total.errors"` - ApachedruidQueryCacheTotalEvictions MetricConfig `mapstructure:"apachedruid.query.cache.total.evictions"` - ApachedruidQueryCacheTotalHitRate MetricConfig `mapstructure:"apachedruid.query.cache.total.hit_rate"` - ApachedruidQueryCacheTotalHits MetricConfig `mapstructure:"apachedruid.query.cache.total.hits"` - ApachedruidQueryCacheTotalMisses MetricConfig `mapstructure:"apachedruid.query.cache.total.misses"` - ApachedruidQueryCacheTotalNumEntries MetricConfig `mapstructure:"apachedruid.query.cache.total.num_entries"` - ApachedruidQueryCacheTotalPutError MetricConfig `mapstructure:"apachedruid.query.cache.total.put.error"` - ApachedruidQueryCacheTotalPutOk MetricConfig `mapstructure:"apachedruid.query.cache.total.put.ok"` - ApachedruidQueryCacheTotalPutOversized MetricConfig `mapstructure:"apachedruid.query.cache.total.put.oversized"` - ApachedruidQueryCacheTotalSizeBytes MetricConfig `mapstructure:"apachedruid.query.cache.total.size_bytes"` - ApachedruidQueryCacheTotalTimeouts MetricConfig `mapstructure:"apachedruid.query.cache.total.timeouts"` - ApachedruidQueryCount MetricConfig `mapstructure:"apachedruid.query.count"` - ApachedruidQueryCPUTime MetricConfig `mapstructure:"apachedruid.query.cpu.time"` - ApachedruidQueryFailedCount MetricConfig `mapstructure:"apachedruid.query.failed.count"` - ApachedruidQueryInterruptedCount MetricConfig `mapstructure:"apachedruid.query.interrupted.count"` - ApachedruidQueryNodeBackpressure MetricConfig `mapstructure:"apachedruid.query.node.backpressure"` - ApachedruidQueryNodeBytes MetricConfig `mapstructure:"apachedruid.query.node.bytes"` - ApachedruidQueryNodeTime MetricConfig `mapstructure:"apachedruid.query.node.time"` - ApachedruidQueryNodeTtfb MetricConfig `mapstructure:"apachedruid.query.node.ttfb"` - ApachedruidQueryPriority MetricConfig `mapstructure:"apachedruid.query.priority"` - ApachedruidQueryRowLimitExceededCount MetricConfig `mapstructure:"apachedruid.query.row_limit.exceeded.count"` - ApachedruidQuerySegmentTime MetricConfig `mapstructure:"apachedruid.query.segment.time"` - ApachedruidQuerySegmentAndCacheTime MetricConfig `mapstructure:"apachedruid.query.segment_and_cache.time"` - ApachedruidQuerySegmentsCount MetricConfig `mapstructure:"apachedruid.query.segments.count"` - ApachedruidQuerySuccessCount MetricConfig `mapstructure:"apachedruid.query.success.count"` - ApachedruidQueryTime MetricConfig `mapstructure:"apachedruid.query.time"` - ApachedruidQueryTimeoutCount MetricConfig `mapstructure:"apachedruid.query.timeout.count"` - ApachedruidQueryWaitTime MetricConfig `mapstructure:"apachedruid.query.wait.time"` - ApachedruidSegmentAddedBytes MetricConfig `mapstructure:"apachedruid.segment.added.bytes"` - ApachedruidSegmentAssignSkippedCount MetricConfig `mapstructure:"apachedruid.segment.assign_skipped.count"` - ApachedruidSegmentAssignedCount MetricConfig `mapstructure:"apachedruid.segment.assigned.count"` - ApachedruidSegmentCompactedBytes MetricConfig `mapstructure:"apachedruid.segment.compacted.bytes"` - ApachedruidSegmentCompactedCount MetricConfig `mapstructure:"apachedruid.segment.compacted.count"` - ApachedruidSegmentCount MetricConfig `mapstructure:"apachedruid.segment.count"` - ApachedruidSegmentDeletedCount MetricConfig `mapstructure:"apachedruid.segment.deleted.count"` - ApachedruidSegmentDropQueueCount MetricConfig `mapstructure:"apachedruid.segment.drop_queue.count"` - ApachedruidSegmentDropSkippedCount MetricConfig `mapstructure:"apachedruid.segment.drop_skipped.count"` - ApachedruidSegmentDroppedCount MetricConfig `mapstructure:"apachedruid.segment.dropped.count"` - ApachedruidSegmentLoadQueueAssigned MetricConfig `mapstructure:"apachedruid.segment.load_queue.assigned"` - ApachedruidSegmentLoadQueueCancelled MetricConfig `mapstructure:"apachedruid.segment.load_queue.cancelled"` - ApachedruidSegmentLoadQueueCount MetricConfig `mapstructure:"apachedruid.segment.load_queue.count"` - ApachedruidSegmentLoadQueueFailed MetricConfig `mapstructure:"apachedruid.segment.load_queue.failed"` - ApachedruidSegmentLoadQueueSize MetricConfig `mapstructure:"apachedruid.segment.load_queue.size"` - ApachedruidSegmentLoadQueueSuccess MetricConfig `mapstructure:"apachedruid.segment.load_queue.success"` - ApachedruidSegmentMax MetricConfig `mapstructure:"apachedruid.segment.max"` - ApachedruidSegmentMoveSkippedCount MetricConfig `mapstructure:"apachedruid.segment.move_skipped.count"` - ApachedruidSegmentMovedBytes MetricConfig `mapstructure:"apachedruid.segment.moved.bytes"` - ApachedruidSegmentMovedCount MetricConfig `mapstructure:"apachedruid.segment.moved.count"` - ApachedruidSegmentNukedBytes MetricConfig `mapstructure:"apachedruid.segment.nuked.bytes"` - ApachedruidSegmentOverShadowedCount MetricConfig `mapstructure:"apachedruid.segment.over_shadowed.count"` - ApachedruidSegmentPendingDelete MetricConfig `mapstructure:"apachedruid.segment.pending_delete"` - ApachedruidSegmentRowCountAvg MetricConfig `mapstructure:"apachedruid.segment.row_count.avg"` - ApachedruidSegmentRowCountRangeCount MetricConfig `mapstructure:"apachedruid.segment.row_count.range.count"` - ApachedruidSegmentScanActive MetricConfig `mapstructure:"apachedruid.segment.scan.active"` - ApachedruidSegmentScanPending MetricConfig `mapstructure:"apachedruid.segment.scan.pending"` - ApachedruidSegmentSize MetricConfig `mapstructure:"apachedruid.segment.size"` - ApachedruidSegmentSkipCompactBytes MetricConfig `mapstructure:"apachedruid.segment.skip_compact.bytes"` - ApachedruidSegmentSkipCompactCount MetricConfig `mapstructure:"apachedruid.segment.skip_compact.count"` - ApachedruidSegmentUnavailableCount MetricConfig `mapstructure:"apachedruid.segment.unavailable.count"` - ApachedruidSegmentUnderReplicatedCount MetricConfig `mapstructure:"apachedruid.segment.under_replicated.count"` - ApachedruidSegmentUnneededCount MetricConfig `mapstructure:"apachedruid.segment.unneeded.count"` - ApachedruidSegmentUsed MetricConfig `mapstructure:"apachedruid.segment.used"` - ApachedruidSegmentUsedPercent MetricConfig `mapstructure:"apachedruid.segment.used_percent"` - ApachedruidSegmentWaitCompactBytes MetricConfig `mapstructure:"apachedruid.segment.wait_compact.bytes"` - ApachedruidSegmentWaitCompactCount MetricConfig `mapstructure:"apachedruid.segment.wait_compact.count"` - ApachedruidServerviewInitTime MetricConfig `mapstructure:"apachedruid.serverview.init.time"` - ApachedruidServerviewSyncHealthy MetricConfig `mapstructure:"apachedruid.serverview.sync.healthy"` - ApachedruidServerviewSyncUnstableTime MetricConfig `mapstructure:"apachedruid.serverview.sync.unstable_time"` - ApachedruidSQLQueryBytes MetricConfig `mapstructure:"apachedruid.sql_query.bytes"` - ApachedruidSQLQueryPlanningTimeMs MetricConfig `mapstructure:"apachedruid.sql_query.planning_time_ms"` - ApachedruidSQLQueryTime MetricConfig `mapstructure:"apachedruid.sql_query.time"` - ApachedruidSubqueryByteLimitCount MetricConfig `mapstructure:"apachedruid.subquery.byte_limit.count"` - ApachedruidSubqueryFallbackCount MetricConfig `mapstructure:"apachedruid.subquery.fallback.count"` - ApachedruidSubqueryFallbackInsufficientTypeCount MetricConfig `mapstructure:"apachedruid.subquery.fallback.insufficient_type.count"` - ApachedruidSubqueryFallbackUnknownReasonCount MetricConfig `mapstructure:"apachedruid.subquery.fallback.unknown_reason.count"` - ApachedruidSubqueryRowLimitCount MetricConfig `mapstructure:"apachedruid.subquery.row_limit.count"` - ApachedruidSysCPU MetricConfig `mapstructure:"apachedruid.sys.cpu"` - ApachedruidSysDiskQueue MetricConfig `mapstructure:"apachedruid.sys.disk.queue"` - ApachedruidSysDiskReadCount MetricConfig `mapstructure:"apachedruid.sys.disk.read.count"` - ApachedruidSysDiskReadSize MetricConfig `mapstructure:"apachedruid.sys.disk.read.size"` - ApachedruidSysDiskTransferTime MetricConfig `mapstructure:"apachedruid.sys.disk.transfer_time"` - ApachedruidSysDiskWriteCount MetricConfig `mapstructure:"apachedruid.sys.disk.write.count"` - ApachedruidSysDiskWriteSize MetricConfig `mapstructure:"apachedruid.sys.disk.write.size"` - ApachedruidSysFsFilesCount MetricConfig `mapstructure:"apachedruid.sys.fs.files.count"` - ApachedruidSysFsFilesFree MetricConfig `mapstructure:"apachedruid.sys.fs.files.free"` - ApachedruidSysFsMax MetricConfig `mapstructure:"apachedruid.sys.fs.max"` - ApachedruidSysFsUsed MetricConfig `mapstructure:"apachedruid.sys.fs.used"` - ApachedruidSysLa1 MetricConfig `mapstructure:"apachedruid.sys.la.1"` - ApachedruidSysLa15 MetricConfig `mapstructure:"apachedruid.sys.la.15"` - ApachedruidSysLa5 MetricConfig `mapstructure:"apachedruid.sys.la.5"` - ApachedruidSysMemFree MetricConfig `mapstructure:"apachedruid.sys.mem.free"` - ApachedruidSysMemMax MetricConfig `mapstructure:"apachedruid.sys.mem.max"` - ApachedruidSysMemUsed MetricConfig `mapstructure:"apachedruid.sys.mem.used"` - ApachedruidSysNetReadDropped MetricConfig `mapstructure:"apachedruid.sys.net.read.dropped"` - ApachedruidSysNetReadErrors MetricConfig `mapstructure:"apachedruid.sys.net.read.errors"` - ApachedruidSysNetReadPackets MetricConfig `mapstructure:"apachedruid.sys.net.read.packets"` - ApachedruidSysNetReadSize MetricConfig `mapstructure:"apachedruid.sys.net.read.size"` - ApachedruidSysNetWriteCollisions MetricConfig `mapstructure:"apachedruid.sys.net.write.collisions"` - ApachedruidSysNetWriteErrors MetricConfig `mapstructure:"apachedruid.sys.net.write.errors"` - ApachedruidSysNetWritePackets MetricConfig `mapstructure:"apachedruid.sys.net.write.packets"` - ApachedruidSysNetWriteSize MetricConfig `mapstructure:"apachedruid.sys.net.write.size"` - ApachedruidSysStorageUsed MetricConfig `mapstructure:"apachedruid.sys.storage.used"` - ApachedruidSysSwapFree MetricConfig `mapstructure:"apachedruid.sys.swap.free"` - ApachedruidSysSwapMax MetricConfig `mapstructure:"apachedruid.sys.swap.max"` - ApachedruidSysSwapPageIn MetricConfig `mapstructure:"apachedruid.sys.swap.page_in"` - ApachedruidSysSwapPageOut MetricConfig `mapstructure:"apachedruid.sys.swap.page_out"` - ApachedruidSysTcpv4ActiveOpens MetricConfig `mapstructure:"apachedruid.sys.tcpv4.active_opens"` - ApachedruidSysTcpv4AttemptFails MetricConfig `mapstructure:"apachedruid.sys.tcpv4.attempt_fails"` - ApachedruidSysTcpv4EstabResets MetricConfig `mapstructure:"apachedruid.sys.tcpv4.estab_resets"` - ApachedruidSysTcpv4InErrs MetricConfig `mapstructure:"apachedruid.sys.tcpv4.in.errs"` - ApachedruidSysTcpv4InSegs MetricConfig `mapstructure:"apachedruid.sys.tcpv4.in.segs"` - ApachedruidSysTcpv4OutRsts MetricConfig `mapstructure:"apachedruid.sys.tcpv4.out.rsts"` - ApachedruidSysTcpv4OutSegs MetricConfig `mapstructure:"apachedruid.sys.tcpv4.out.segs"` - ApachedruidSysTcpv4PassiveOpens MetricConfig `mapstructure:"apachedruid.sys.tcpv4.passive_opens"` - ApachedruidSysTcpv4RetransSegs MetricConfig `mapstructure:"apachedruid.sys.tcpv4.retrans.segs"` - ApachedruidSysUptime MetricConfig `mapstructure:"apachedruid.sys.uptime"` - ApachedruidTaskActionBatchAttempts MetricConfig `mapstructure:"apachedruid.task.action.batch.attempts"` - ApachedruidTaskActionBatchQueueTime MetricConfig `mapstructure:"apachedruid.task.action.batch.queue_time"` - ApachedruidTaskActionBatchRunTime MetricConfig `mapstructure:"apachedruid.task.action.batch.run_time"` - ApachedruidTaskActionBatchSize MetricConfig `mapstructure:"apachedruid.task.action.batch.size"` - ApachedruidTaskActionFailedCount MetricConfig `mapstructure:"apachedruid.task.action.failed.count"` - ApachedruidTaskActionLogTime MetricConfig `mapstructure:"apachedruid.task.action.log.time"` - ApachedruidTaskActionRunTime MetricConfig `mapstructure:"apachedruid.task.action.run.time"` - ApachedruidTaskActionSuccessCount MetricConfig `mapstructure:"apachedruid.task.action.success.count"` - ApachedruidTaskFailedCount MetricConfig `mapstructure:"apachedruid.task.failed.count"` - ApachedruidTaskPendingCount MetricConfig `mapstructure:"apachedruid.task.pending.count"` - ApachedruidTaskPendingTime MetricConfig `mapstructure:"apachedruid.task.pending.time"` - ApachedruidTaskRunTime MetricConfig `mapstructure:"apachedruid.task.run.time"` - ApachedruidTaskRunningCount MetricConfig `mapstructure:"apachedruid.task.running.count"` - ApachedruidTaskSegmentAvailabilityWaitTime MetricConfig `mapstructure:"apachedruid.task.segment_availability.wait.time"` - ApachedruidTaskSuccessCount MetricConfig `mapstructure:"apachedruid.task.success.count"` - ApachedruidTaskWaitingCount MetricConfig `mapstructure:"apachedruid.task.waiting.count"` - ApachedruidTaskSlotBlacklistedCount MetricConfig `mapstructure:"apachedruid.task_slot.blacklisted.count"` - ApachedruidTaskSlotIdleCount MetricConfig `mapstructure:"apachedruid.task_slot.idle.count"` - ApachedruidTaskSlotLazyCount MetricConfig `mapstructure:"apachedruid.task_slot.lazy.count"` - ApachedruidTaskSlotTotalCount MetricConfig `mapstructure:"apachedruid.task_slot.total.count"` - ApachedruidTaskSlotUsedCount MetricConfig `mapstructure:"apachedruid.task_slot.used.count"` - ApachedruidTierHistoricalCount MetricConfig `mapstructure:"apachedruid.tier.historical.count"` - ApachedruidTierReplicationFactor MetricConfig `mapstructure:"apachedruid.tier.replication.factor"` - ApachedruidTierRequiredCapacity MetricConfig `mapstructure:"apachedruid.tier.required.capacity"` - ApachedruidTierTotalCapacity MetricConfig `mapstructure:"apachedruid.tier.total.capacity"` - ApachedruidWorkerTaskFailedCount MetricConfig `mapstructure:"apachedruid.worker.task.failed.count"` - ApachedruidWorkerTaskSuccessCount MetricConfig `mapstructure:"apachedruid.worker.task.success.count"` - ApachedruidWorkerTaskSlotIdleCount MetricConfig `mapstructure:"apachedruid.worker.task_slot.idle.count"` - ApachedruidWorkerTaskSlotTotalCount MetricConfig `mapstructure:"apachedruid.worker.task_slot.total.count"` - ApachedruidWorkerTaskSlotUsedCount MetricConfig `mapstructure:"apachedruid.worker.task_slot.used.count"` - ApachedruidZkConnected MetricConfig `mapstructure:"apachedruid.zk.connected"` - ApachedruidZkReconnectTime MetricConfig `mapstructure:"apachedruid.zk.reconnect.time"` -} - -func DefaultMetricsConfig() MetricsConfig { - return MetricsConfig{ - ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis: MetricConfig{ - Enabled: true, - }, - ApachedruidCompactTaskCount: MetricConfig{ - Enabled: true, - }, - ApachedruidCompactTaskAvailableSlotCount: MetricConfig{ - Enabled: true, - }, - ApachedruidCompactTaskMaxSlotCount: MetricConfig{ - Enabled: true, - }, - ApachedruidCoordinatorGlobalTime: MetricConfig{ - Enabled: true, - }, - ApachedruidCoordinatorTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestBytesReceived: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestCount: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestEventsBuffered: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestEventsDuplicate: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestEventsMessageGap: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestEventsProcessed: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestEventsProcessedWithError: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestEventsThrownAway: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestEventsUnparseable: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestHandoffCount: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestHandoffFailed: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestHandoffTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestInputBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestKafkaAvgLag: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestKafkaLag: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestKafkaMaxLag: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestKafkaPartitionLag: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestKinesisAvgLagTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestKinesisLagTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestKinesisMaxLagTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestKinesisPartitionLagTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestMergeCPU: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestMergeTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestNoticesQueueSize: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestNoticesTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestPauseTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestPersistsBackPressure: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestPersistsCount: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestPersistsCPU: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestPersistsFailed: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestPersistsTime: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestRowsOutput: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestSegmentsCount: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestShuffleBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestShuffleRequests: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestSinkCount: MetricConfig{ - Enabled: true, - }, - ApachedruidIngestTombstonesCount: MetricConfig{ - Enabled: true, - }, - ApachedruidIntervalCompactedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidIntervalSkipCompactCount: MetricConfig{ - Enabled: true, - }, - ApachedruidIntervalWaitCompactCount: MetricConfig{ - Enabled: true, - }, - ApachedruidJettyNumOpenConnections: MetricConfig{ - Enabled: true, - }, - ApachedruidJettyThreadPoolBusy: MetricConfig{ - Enabled: true, - }, - ApachedruidJettyThreadPoolIdle: MetricConfig{ - Enabled: true, - }, - ApachedruidJettyThreadPoolIsLowOnThreads: MetricConfig{ - Enabled: true, - }, - ApachedruidJettyThreadPoolMax: MetricConfig{ - Enabled: true, - }, - ApachedruidJettyThreadPoolMin: MetricConfig{ - Enabled: true, - }, - ApachedruidJettyThreadPoolQueueSize: MetricConfig{ - Enabled: true, - }, - ApachedruidJettyThreadPoolTotal: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmBufferpoolCapacity: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmBufferpoolCount: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmBufferpoolUsed: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmGcCount: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmGcCPU: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmMemCommitted: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmMemInit: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmMemMax: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmMemUsed: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmPoolCommitted: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmPoolInit: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmPoolMax: MetricConfig{ - Enabled: true, - }, - ApachedruidJvmPoolUsed: MetricConfig{ - Enabled: true, - }, - ApachedruidKillPendingSegmentsCount: MetricConfig{ - Enabled: true, - }, - ApachedruidKillTaskCount: MetricConfig{ - Enabled: true, - }, - ApachedruidKillTaskAvailableSlotCount: MetricConfig{ - Enabled: true, - }, - ApachedruidKillTaskMaxSlotCount: MetricConfig{ - Enabled: true, - }, - ApachedruidMergeBufferPendingRequests: MetricConfig{ - Enabled: true, - }, - ApachedruidMetadataKillAuditCount: MetricConfig{ - Enabled: true, - }, - ApachedruidMetadataKillCompactionCount: MetricConfig{ - Enabled: true, - }, - ApachedruidMetadataKillDatasourceCount: MetricConfig{ - Enabled: true, - }, - ApachedruidMetadataKillRuleCount: MetricConfig{ - Enabled: true, - }, - ApachedruidMetadataKillSupervisorCount: MetricConfig{ - Enabled: true, - }, - ApachedruidMetadatacacheInitTime: MetricConfig{ - Enabled: true, - }, - ApachedruidMetadatacacheRefreshCount: MetricConfig{ - Enabled: true, - }, - ApachedruidMetadatacacheRefreshTime: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryByteLimitExceededCount: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaAverageBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaErrors: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaEvictions: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaHitRate: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaHits: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaMisses: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaNumEntries: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaPutError: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaPutOk: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaPutOversized: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaSizeBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheDeltaTimeouts: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheMemcachedDelta: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheMemcachedTotal: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalAverageBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalErrors: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalEvictions: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalHitRate: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalHits: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalMisses: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalNumEntries: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalPutError: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalPutOk: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalPutOversized: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalSizeBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCacheTotalTimeouts: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCount: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryCPUTime: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryFailedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryInterruptedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryNodeBackpressure: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryNodeBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryNodeTime: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryNodeTtfb: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryPriority: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryRowLimitExceededCount: MetricConfig{ - Enabled: true, - }, - ApachedruidQuerySegmentTime: MetricConfig{ - Enabled: true, - }, - ApachedruidQuerySegmentAndCacheTime: MetricConfig{ - Enabled: true, - }, - ApachedruidQuerySegmentsCount: MetricConfig{ - Enabled: true, - }, - ApachedruidQuerySuccessCount: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryTime: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryTimeoutCount: MetricConfig{ - Enabled: true, - }, - ApachedruidQueryWaitTime: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentAddedBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentAssignSkippedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentAssignedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentCompactedBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentCompactedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentDeletedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentDropQueueCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentDropSkippedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentDroppedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentLoadQueueAssigned: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentLoadQueueCancelled: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentLoadQueueCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentLoadQueueFailed: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentLoadQueueSize: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentLoadQueueSuccess: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentMax: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentMoveSkippedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentMovedBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentMovedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentNukedBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentOverShadowedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentPendingDelete: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentRowCountAvg: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentRowCountRangeCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentScanActive: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentScanPending: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentSize: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentSkipCompactBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentSkipCompactCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentUnavailableCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentUnderReplicatedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentUnneededCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentUsed: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentUsedPercent: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentWaitCompactBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidSegmentWaitCompactCount: MetricConfig{ - Enabled: true, - }, - ApachedruidServerviewInitTime: MetricConfig{ - Enabled: true, - }, - ApachedruidServerviewSyncHealthy: MetricConfig{ - Enabled: true, - }, - ApachedruidServerviewSyncUnstableTime: MetricConfig{ - Enabled: true, - }, - ApachedruidSQLQueryBytes: MetricConfig{ - Enabled: true, - }, - ApachedruidSQLQueryPlanningTimeMs: MetricConfig{ - Enabled: true, - }, - ApachedruidSQLQueryTime: MetricConfig{ - Enabled: true, - }, - ApachedruidSubqueryByteLimitCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSubqueryFallbackCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSubqueryFallbackInsufficientTypeCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSubqueryFallbackUnknownReasonCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSubqueryRowLimitCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSysCPU: MetricConfig{ - Enabled: true, - }, - ApachedruidSysDiskQueue: MetricConfig{ - Enabled: true, - }, - ApachedruidSysDiskReadCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSysDiskReadSize: MetricConfig{ - Enabled: true, - }, - ApachedruidSysDiskTransferTime: MetricConfig{ - Enabled: true, - }, - ApachedruidSysDiskWriteCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSysDiskWriteSize: MetricConfig{ - Enabled: true, - }, - ApachedruidSysFsFilesCount: MetricConfig{ - Enabled: true, - }, - ApachedruidSysFsFilesFree: MetricConfig{ - Enabled: true, - }, - ApachedruidSysFsMax: MetricConfig{ - Enabled: true, - }, - ApachedruidSysFsUsed: MetricConfig{ - Enabled: true, - }, - ApachedruidSysLa1: MetricConfig{ - Enabled: true, - }, - ApachedruidSysLa15: MetricConfig{ - Enabled: true, - }, - ApachedruidSysLa5: MetricConfig{ - Enabled: true, - }, - ApachedruidSysMemFree: MetricConfig{ - Enabled: true, - }, - ApachedruidSysMemMax: MetricConfig{ - Enabled: true, - }, - ApachedruidSysMemUsed: MetricConfig{ - Enabled: true, - }, - ApachedruidSysNetReadDropped: MetricConfig{ - Enabled: true, - }, - ApachedruidSysNetReadErrors: MetricConfig{ - Enabled: true, - }, - ApachedruidSysNetReadPackets: MetricConfig{ - Enabled: true, - }, - ApachedruidSysNetReadSize: MetricConfig{ - Enabled: true, - }, - ApachedruidSysNetWriteCollisions: MetricConfig{ - Enabled: true, - }, - ApachedruidSysNetWriteErrors: MetricConfig{ - Enabled: true, - }, - ApachedruidSysNetWritePackets: MetricConfig{ - Enabled: true, - }, - ApachedruidSysNetWriteSize: MetricConfig{ - Enabled: true, - }, - ApachedruidSysStorageUsed: MetricConfig{ - Enabled: true, - }, - ApachedruidSysSwapFree: MetricConfig{ - Enabled: true, - }, - ApachedruidSysSwapMax: MetricConfig{ - Enabled: true, - }, - ApachedruidSysSwapPageIn: MetricConfig{ - Enabled: true, - }, - ApachedruidSysSwapPageOut: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4ActiveOpens: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4AttemptFails: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4EstabResets: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4InErrs: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4InSegs: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4OutRsts: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4OutSegs: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4PassiveOpens: MetricConfig{ - Enabled: true, - }, - ApachedruidSysTcpv4RetransSegs: MetricConfig{ - Enabled: true, - }, - ApachedruidSysUptime: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskActionBatchAttempts: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskActionBatchQueueTime: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskActionBatchRunTime: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskActionBatchSize: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskActionFailedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskActionLogTime: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskActionRunTime: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskActionSuccessCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskFailedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskPendingCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskPendingTime: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskRunTime: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskRunningCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskSegmentAvailabilityWaitTime: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskSuccessCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskWaitingCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskSlotBlacklistedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskSlotIdleCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskSlotLazyCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskSlotTotalCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTaskSlotUsedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTierHistoricalCount: MetricConfig{ - Enabled: true, - }, - ApachedruidTierReplicationFactor: MetricConfig{ - Enabled: true, - }, - ApachedruidTierRequiredCapacity: MetricConfig{ - Enabled: true, - }, - ApachedruidTierTotalCapacity: MetricConfig{ - Enabled: true, - }, - ApachedruidWorkerTaskFailedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidWorkerTaskSuccessCount: MetricConfig{ - Enabled: true, - }, - ApachedruidWorkerTaskSlotIdleCount: MetricConfig{ - Enabled: true, - }, - ApachedruidWorkerTaskSlotTotalCount: MetricConfig{ - Enabled: true, - }, - ApachedruidWorkerTaskSlotUsedCount: MetricConfig{ - Enabled: true, - }, - ApachedruidZkConnected: MetricConfig{ - Enabled: true, - }, - ApachedruidZkReconnectTime: MetricConfig{ - Enabled: true, - }, - } -} - -// ResourceAttributeConfig provides common config for a particular resource attribute. -type ResourceAttributeConfig struct { - Enabled bool `mapstructure:"enabled"` - - enabledSetByUser bool -} - -func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { - if parser == nil { - return nil - } - err := parser.Unmarshal(rac) - if err != nil { - return err - } - rac.enabledSetByUser = parser.IsSet("enabled") - return nil -} - -// ResourceAttributesConfig provides config for apachedruid resource attributes. -type ResourceAttributesConfig struct { - ApachedruidClusterName ResourceAttributeConfig `mapstructure:"apachedruid.cluster.name"` - ApachedruidNodeHost ResourceAttributeConfig `mapstructure:"apachedruid.node.host"` - ApachedruidNodeService ResourceAttributeConfig `mapstructure:"apachedruid.node.service"` -} - -func DefaultResourceAttributesConfig() ResourceAttributesConfig { - return ResourceAttributesConfig{ - ApachedruidClusterName: ResourceAttributeConfig{ - Enabled: true, - }, - ApachedruidNodeHost: ResourceAttributeConfig{ - Enabled: true, - }, - ApachedruidNodeService: ResourceAttributeConfig{ - Enabled: true, - }, - } -} - -// MetricsBuilderConfig is a configuration for apachedruid metrics builder. -type MetricsBuilderConfig struct { - Metrics MetricsConfig `mapstructure:"metrics"` - ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` -} - -func DefaultMetricsBuilderConfig() MetricsBuilderConfig { - return MetricsBuilderConfig{ - Metrics: DefaultMetricsConfig(), - ResourceAttributes: DefaultResourceAttributesConfig(), - } -} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_config_test.go b/receiver/apachedruidreceiver/internal/metadata/generated_config_test.go deleted file mode 100644 index 40312148ee7c1..0000000000000 --- a/receiver/apachedruidreceiver/internal/metadata/generated_config_test.go +++ /dev/null @@ -1,606 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/confmap/confmaptest" -) - -func TestMetricsBuilderConfig(t *testing.T) { - tests := []struct { - name string - want MetricsBuilderConfig - }{ - { - name: "default", - want: DefaultMetricsBuilderConfig(), - }, - { - name: "all_set", - want: MetricsBuilderConfig{ - Metrics: MetricsConfig{ - ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis: MetricConfig{Enabled: true}, - ApachedruidCompactTaskCount: MetricConfig{Enabled: true}, - ApachedruidCompactTaskAvailableSlotCount: MetricConfig{Enabled: true}, - ApachedruidCompactTaskMaxSlotCount: MetricConfig{Enabled: true}, - ApachedruidCoordinatorGlobalTime: MetricConfig{Enabled: true}, - ApachedruidCoordinatorTime: MetricConfig{Enabled: true}, - ApachedruidIngestBytesReceived: MetricConfig{Enabled: true}, - ApachedruidIngestCount: MetricConfig{Enabled: true}, - ApachedruidIngestEventsBuffered: MetricConfig{Enabled: true}, - ApachedruidIngestEventsDuplicate: MetricConfig{Enabled: true}, - ApachedruidIngestEventsMessageGap: MetricConfig{Enabled: true}, - ApachedruidIngestEventsProcessed: MetricConfig{Enabled: true}, - ApachedruidIngestEventsProcessedWithError: MetricConfig{Enabled: true}, - ApachedruidIngestEventsThrownAway: MetricConfig{Enabled: true}, - ApachedruidIngestEventsUnparseable: MetricConfig{Enabled: true}, - ApachedruidIngestHandoffCount: MetricConfig{Enabled: true}, - ApachedruidIngestHandoffFailed: MetricConfig{Enabled: true}, - ApachedruidIngestHandoffTime: MetricConfig{Enabled: true}, - ApachedruidIngestInputBytes: MetricConfig{Enabled: true}, - ApachedruidIngestKafkaAvgLag: MetricConfig{Enabled: true}, - ApachedruidIngestKafkaLag: MetricConfig{Enabled: true}, - ApachedruidIngestKafkaMaxLag: MetricConfig{Enabled: true}, - ApachedruidIngestKafkaPartitionLag: MetricConfig{Enabled: true}, - ApachedruidIngestKinesisAvgLagTime: MetricConfig{Enabled: true}, - ApachedruidIngestKinesisLagTime: MetricConfig{Enabled: true}, - ApachedruidIngestKinesisMaxLagTime: MetricConfig{Enabled: true}, - ApachedruidIngestKinesisPartitionLagTime: MetricConfig{Enabled: true}, - ApachedruidIngestMergeCPU: MetricConfig{Enabled: true}, - ApachedruidIngestMergeTime: MetricConfig{Enabled: true}, - ApachedruidIngestNoticesQueueSize: MetricConfig{Enabled: true}, - ApachedruidIngestNoticesTime: MetricConfig{Enabled: true}, - ApachedruidIngestPauseTime: MetricConfig{Enabled: true}, - ApachedruidIngestPersistsBackPressure: MetricConfig{Enabled: true}, - ApachedruidIngestPersistsCount: MetricConfig{Enabled: true}, - ApachedruidIngestPersistsCPU: MetricConfig{Enabled: true}, - ApachedruidIngestPersistsFailed: MetricConfig{Enabled: true}, - ApachedruidIngestPersistsTime: MetricConfig{Enabled: true}, - ApachedruidIngestRowsOutput: MetricConfig{Enabled: true}, - ApachedruidIngestSegmentsCount: MetricConfig{Enabled: true}, - ApachedruidIngestShuffleBytes: MetricConfig{Enabled: true}, - ApachedruidIngestShuffleRequests: MetricConfig{Enabled: true}, - ApachedruidIngestSinkCount: MetricConfig{Enabled: true}, - ApachedruidIngestTombstonesCount: MetricConfig{Enabled: true}, - ApachedruidIntervalCompactedCount: MetricConfig{Enabled: true}, - ApachedruidIntervalSkipCompactCount: MetricConfig{Enabled: true}, - ApachedruidIntervalWaitCompactCount: MetricConfig{Enabled: true}, - ApachedruidJettyNumOpenConnections: MetricConfig{Enabled: true}, - ApachedruidJettyThreadPoolBusy: MetricConfig{Enabled: true}, - ApachedruidJettyThreadPoolIdle: MetricConfig{Enabled: true}, - ApachedruidJettyThreadPoolIsLowOnThreads: MetricConfig{Enabled: true}, - ApachedruidJettyThreadPoolMax: MetricConfig{Enabled: true}, - ApachedruidJettyThreadPoolMin: MetricConfig{Enabled: true}, - ApachedruidJettyThreadPoolQueueSize: MetricConfig{Enabled: true}, - ApachedruidJettyThreadPoolTotal: MetricConfig{Enabled: true}, - ApachedruidJvmBufferpoolCapacity: MetricConfig{Enabled: true}, - ApachedruidJvmBufferpoolCount: MetricConfig{Enabled: true}, - ApachedruidJvmBufferpoolUsed: MetricConfig{Enabled: true}, - ApachedruidJvmGcCount: MetricConfig{Enabled: true}, - ApachedruidJvmGcCPU: MetricConfig{Enabled: true}, - ApachedruidJvmMemCommitted: MetricConfig{Enabled: true}, - ApachedruidJvmMemInit: MetricConfig{Enabled: true}, - ApachedruidJvmMemMax: MetricConfig{Enabled: true}, - ApachedruidJvmMemUsed: MetricConfig{Enabled: true}, - ApachedruidJvmPoolCommitted: MetricConfig{Enabled: true}, - ApachedruidJvmPoolInit: MetricConfig{Enabled: true}, - ApachedruidJvmPoolMax: MetricConfig{Enabled: true}, - ApachedruidJvmPoolUsed: MetricConfig{Enabled: true}, - ApachedruidKillPendingSegmentsCount: MetricConfig{Enabled: true}, - ApachedruidKillTaskCount: MetricConfig{Enabled: true}, - ApachedruidKillTaskAvailableSlotCount: MetricConfig{Enabled: true}, - ApachedruidKillTaskMaxSlotCount: MetricConfig{Enabled: true}, - ApachedruidMergeBufferPendingRequests: MetricConfig{Enabled: true}, - ApachedruidMetadataKillAuditCount: MetricConfig{Enabled: true}, - ApachedruidMetadataKillCompactionCount: MetricConfig{Enabled: true}, - ApachedruidMetadataKillDatasourceCount: MetricConfig{Enabled: true}, - ApachedruidMetadataKillRuleCount: MetricConfig{Enabled: true}, - ApachedruidMetadataKillSupervisorCount: MetricConfig{Enabled: true}, - ApachedruidMetadatacacheInitTime: MetricConfig{Enabled: true}, - ApachedruidMetadatacacheRefreshCount: MetricConfig{Enabled: true}, - ApachedruidMetadatacacheRefreshTime: MetricConfig{Enabled: true}, - ApachedruidQueryByteLimitExceededCount: MetricConfig{Enabled: true}, - ApachedruidQueryBytes: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaAverageBytes: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaErrors: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaEvictions: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaHitRate: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaHits: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaMisses: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaNumEntries: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaPutError: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaPutOk: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaPutOversized: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaSizeBytes: MetricConfig{Enabled: true}, - ApachedruidQueryCacheDeltaTimeouts: MetricConfig{Enabled: true}, - ApachedruidQueryCacheMemcachedDelta: MetricConfig{Enabled: true}, - ApachedruidQueryCacheMemcachedTotal: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalAverageBytes: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalErrors: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalEvictions: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalHitRate: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalHits: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalMisses: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalNumEntries: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalPutError: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalPutOk: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalPutOversized: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalSizeBytes: MetricConfig{Enabled: true}, - ApachedruidQueryCacheTotalTimeouts: MetricConfig{Enabled: true}, - ApachedruidQueryCount: MetricConfig{Enabled: true}, - ApachedruidQueryCPUTime: MetricConfig{Enabled: true}, - ApachedruidQueryFailedCount: MetricConfig{Enabled: true}, - ApachedruidQueryInterruptedCount: MetricConfig{Enabled: true}, - ApachedruidQueryNodeBackpressure: MetricConfig{Enabled: true}, - ApachedruidQueryNodeBytes: MetricConfig{Enabled: true}, - ApachedruidQueryNodeTime: MetricConfig{Enabled: true}, - ApachedruidQueryNodeTtfb: MetricConfig{Enabled: true}, - ApachedruidQueryPriority: MetricConfig{Enabled: true}, - ApachedruidQueryRowLimitExceededCount: MetricConfig{Enabled: true}, - ApachedruidQuerySegmentTime: MetricConfig{Enabled: true}, - ApachedruidQuerySegmentAndCacheTime: MetricConfig{Enabled: true}, - ApachedruidQuerySegmentsCount: MetricConfig{Enabled: true}, - ApachedruidQuerySuccessCount: MetricConfig{Enabled: true}, - ApachedruidQueryTime: MetricConfig{Enabled: true}, - ApachedruidQueryTimeoutCount: MetricConfig{Enabled: true}, - ApachedruidQueryWaitTime: MetricConfig{Enabled: true}, - ApachedruidSegmentAddedBytes: MetricConfig{Enabled: true}, - ApachedruidSegmentAssignSkippedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentAssignedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentCompactedBytes: MetricConfig{Enabled: true}, - ApachedruidSegmentCompactedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentCount: MetricConfig{Enabled: true}, - ApachedruidSegmentDeletedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentDropQueueCount: MetricConfig{Enabled: true}, - ApachedruidSegmentDropSkippedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentDroppedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentLoadQueueAssigned: MetricConfig{Enabled: true}, - ApachedruidSegmentLoadQueueCancelled: MetricConfig{Enabled: true}, - ApachedruidSegmentLoadQueueCount: MetricConfig{Enabled: true}, - ApachedruidSegmentLoadQueueFailed: MetricConfig{Enabled: true}, - ApachedruidSegmentLoadQueueSize: MetricConfig{Enabled: true}, - ApachedruidSegmentLoadQueueSuccess: MetricConfig{Enabled: true}, - ApachedruidSegmentMax: MetricConfig{Enabled: true}, - ApachedruidSegmentMoveSkippedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentMovedBytes: MetricConfig{Enabled: true}, - ApachedruidSegmentMovedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentNukedBytes: MetricConfig{Enabled: true}, - ApachedruidSegmentOverShadowedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentPendingDelete: MetricConfig{Enabled: true}, - ApachedruidSegmentRowCountAvg: MetricConfig{Enabled: true}, - ApachedruidSegmentRowCountRangeCount: MetricConfig{Enabled: true}, - ApachedruidSegmentScanActive: MetricConfig{Enabled: true}, - ApachedruidSegmentScanPending: MetricConfig{Enabled: true}, - ApachedruidSegmentSize: MetricConfig{Enabled: true}, - ApachedruidSegmentSkipCompactBytes: MetricConfig{Enabled: true}, - ApachedruidSegmentSkipCompactCount: MetricConfig{Enabled: true}, - ApachedruidSegmentUnavailableCount: MetricConfig{Enabled: true}, - ApachedruidSegmentUnderReplicatedCount: MetricConfig{Enabled: true}, - ApachedruidSegmentUnneededCount: MetricConfig{Enabled: true}, - ApachedruidSegmentUsed: MetricConfig{Enabled: true}, - ApachedruidSegmentUsedPercent: MetricConfig{Enabled: true}, - ApachedruidSegmentWaitCompactBytes: MetricConfig{Enabled: true}, - ApachedruidSegmentWaitCompactCount: MetricConfig{Enabled: true}, - ApachedruidServerviewInitTime: MetricConfig{Enabled: true}, - ApachedruidServerviewSyncHealthy: MetricConfig{Enabled: true}, - ApachedruidServerviewSyncUnstableTime: MetricConfig{Enabled: true}, - ApachedruidSQLQueryBytes: MetricConfig{Enabled: true}, - ApachedruidSQLQueryPlanningTimeMs: MetricConfig{Enabled: true}, - ApachedruidSQLQueryTime: MetricConfig{Enabled: true}, - ApachedruidSubqueryByteLimitCount: MetricConfig{Enabled: true}, - ApachedruidSubqueryFallbackCount: MetricConfig{Enabled: true}, - ApachedruidSubqueryFallbackInsufficientTypeCount: MetricConfig{Enabled: true}, - ApachedruidSubqueryFallbackUnknownReasonCount: MetricConfig{Enabled: true}, - ApachedruidSubqueryRowLimitCount: MetricConfig{Enabled: true}, - ApachedruidSysCPU: MetricConfig{Enabled: true}, - ApachedruidSysDiskQueue: MetricConfig{Enabled: true}, - ApachedruidSysDiskReadCount: MetricConfig{Enabled: true}, - ApachedruidSysDiskReadSize: MetricConfig{Enabled: true}, - ApachedruidSysDiskTransferTime: MetricConfig{Enabled: true}, - ApachedruidSysDiskWriteCount: MetricConfig{Enabled: true}, - ApachedruidSysDiskWriteSize: MetricConfig{Enabled: true}, - ApachedruidSysFsFilesCount: MetricConfig{Enabled: true}, - ApachedruidSysFsFilesFree: MetricConfig{Enabled: true}, - ApachedruidSysFsMax: MetricConfig{Enabled: true}, - ApachedruidSysFsUsed: MetricConfig{Enabled: true}, - ApachedruidSysLa1: MetricConfig{Enabled: true}, - ApachedruidSysLa15: MetricConfig{Enabled: true}, - ApachedruidSysLa5: MetricConfig{Enabled: true}, - ApachedruidSysMemFree: MetricConfig{Enabled: true}, - ApachedruidSysMemMax: MetricConfig{Enabled: true}, - ApachedruidSysMemUsed: MetricConfig{Enabled: true}, - ApachedruidSysNetReadDropped: MetricConfig{Enabled: true}, - ApachedruidSysNetReadErrors: MetricConfig{Enabled: true}, - ApachedruidSysNetReadPackets: MetricConfig{Enabled: true}, - ApachedruidSysNetReadSize: MetricConfig{Enabled: true}, - ApachedruidSysNetWriteCollisions: MetricConfig{Enabled: true}, - ApachedruidSysNetWriteErrors: MetricConfig{Enabled: true}, - ApachedruidSysNetWritePackets: MetricConfig{Enabled: true}, - ApachedruidSysNetWriteSize: MetricConfig{Enabled: true}, - ApachedruidSysStorageUsed: MetricConfig{Enabled: true}, - ApachedruidSysSwapFree: MetricConfig{Enabled: true}, - ApachedruidSysSwapMax: MetricConfig{Enabled: true}, - ApachedruidSysSwapPageIn: MetricConfig{Enabled: true}, - ApachedruidSysSwapPageOut: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4ActiveOpens: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4AttemptFails: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4EstabResets: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4InErrs: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4InSegs: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4OutRsts: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4OutSegs: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4PassiveOpens: MetricConfig{Enabled: true}, - ApachedruidSysTcpv4RetransSegs: MetricConfig{Enabled: true}, - ApachedruidSysUptime: MetricConfig{Enabled: true}, - ApachedruidTaskActionBatchAttempts: MetricConfig{Enabled: true}, - ApachedruidTaskActionBatchQueueTime: MetricConfig{Enabled: true}, - ApachedruidTaskActionBatchRunTime: MetricConfig{Enabled: true}, - ApachedruidTaskActionBatchSize: MetricConfig{Enabled: true}, - ApachedruidTaskActionFailedCount: MetricConfig{Enabled: true}, - ApachedruidTaskActionLogTime: MetricConfig{Enabled: true}, - ApachedruidTaskActionRunTime: MetricConfig{Enabled: true}, - ApachedruidTaskActionSuccessCount: MetricConfig{Enabled: true}, - ApachedruidTaskFailedCount: MetricConfig{Enabled: true}, - ApachedruidTaskPendingCount: MetricConfig{Enabled: true}, - ApachedruidTaskPendingTime: MetricConfig{Enabled: true}, - ApachedruidTaskRunTime: MetricConfig{Enabled: true}, - ApachedruidTaskRunningCount: MetricConfig{Enabled: true}, - ApachedruidTaskSegmentAvailabilityWaitTime: MetricConfig{Enabled: true}, - ApachedruidTaskSuccessCount: MetricConfig{Enabled: true}, - ApachedruidTaskWaitingCount: MetricConfig{Enabled: true}, - ApachedruidTaskSlotBlacklistedCount: MetricConfig{Enabled: true}, - ApachedruidTaskSlotIdleCount: MetricConfig{Enabled: true}, - ApachedruidTaskSlotLazyCount: MetricConfig{Enabled: true}, - ApachedruidTaskSlotTotalCount: MetricConfig{Enabled: true}, - ApachedruidTaskSlotUsedCount: MetricConfig{Enabled: true}, - ApachedruidTierHistoricalCount: MetricConfig{Enabled: true}, - ApachedruidTierReplicationFactor: MetricConfig{Enabled: true}, - ApachedruidTierRequiredCapacity: MetricConfig{Enabled: true}, - ApachedruidTierTotalCapacity: MetricConfig{Enabled: true}, - ApachedruidWorkerTaskFailedCount: MetricConfig{Enabled: true}, - ApachedruidWorkerTaskSuccessCount: MetricConfig{Enabled: true}, - ApachedruidWorkerTaskSlotIdleCount: MetricConfig{Enabled: true}, - ApachedruidWorkerTaskSlotTotalCount: MetricConfig{Enabled: true}, - ApachedruidWorkerTaskSlotUsedCount: MetricConfig{Enabled: true}, - ApachedruidZkConnected: MetricConfig{Enabled: true}, - ApachedruidZkReconnectTime: MetricConfig{Enabled: true}, - }, - ResourceAttributes: ResourceAttributesConfig{ - ApachedruidClusterName: ResourceAttributeConfig{Enabled: true}, - ApachedruidNodeHost: ResourceAttributeConfig{Enabled: true}, - ApachedruidNodeService: ResourceAttributeConfig{Enabled: true}, - }, - }, - }, - { - name: "none_set", - want: MetricsBuilderConfig{ - Metrics: MetricsConfig{ - ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis: MetricConfig{Enabled: false}, - ApachedruidCompactTaskCount: MetricConfig{Enabled: false}, - ApachedruidCompactTaskAvailableSlotCount: MetricConfig{Enabled: false}, - ApachedruidCompactTaskMaxSlotCount: MetricConfig{Enabled: false}, - ApachedruidCoordinatorGlobalTime: MetricConfig{Enabled: false}, - ApachedruidCoordinatorTime: MetricConfig{Enabled: false}, - ApachedruidIngestBytesReceived: MetricConfig{Enabled: false}, - ApachedruidIngestCount: MetricConfig{Enabled: false}, - ApachedruidIngestEventsBuffered: MetricConfig{Enabled: false}, - ApachedruidIngestEventsDuplicate: MetricConfig{Enabled: false}, - ApachedruidIngestEventsMessageGap: MetricConfig{Enabled: false}, - ApachedruidIngestEventsProcessed: MetricConfig{Enabled: false}, - ApachedruidIngestEventsProcessedWithError: MetricConfig{Enabled: false}, - ApachedruidIngestEventsThrownAway: MetricConfig{Enabled: false}, - ApachedruidIngestEventsUnparseable: MetricConfig{Enabled: false}, - ApachedruidIngestHandoffCount: MetricConfig{Enabled: false}, - ApachedruidIngestHandoffFailed: MetricConfig{Enabled: false}, - ApachedruidIngestHandoffTime: MetricConfig{Enabled: false}, - ApachedruidIngestInputBytes: MetricConfig{Enabled: false}, - ApachedruidIngestKafkaAvgLag: MetricConfig{Enabled: false}, - ApachedruidIngestKafkaLag: MetricConfig{Enabled: false}, - ApachedruidIngestKafkaMaxLag: MetricConfig{Enabled: false}, - ApachedruidIngestKafkaPartitionLag: MetricConfig{Enabled: false}, - ApachedruidIngestKinesisAvgLagTime: MetricConfig{Enabled: false}, - ApachedruidIngestKinesisLagTime: MetricConfig{Enabled: false}, - ApachedruidIngestKinesisMaxLagTime: MetricConfig{Enabled: false}, - ApachedruidIngestKinesisPartitionLagTime: MetricConfig{Enabled: false}, - ApachedruidIngestMergeCPU: MetricConfig{Enabled: false}, - ApachedruidIngestMergeTime: MetricConfig{Enabled: false}, - ApachedruidIngestNoticesQueueSize: MetricConfig{Enabled: false}, - ApachedruidIngestNoticesTime: MetricConfig{Enabled: false}, - ApachedruidIngestPauseTime: MetricConfig{Enabled: false}, - ApachedruidIngestPersistsBackPressure: MetricConfig{Enabled: false}, - ApachedruidIngestPersistsCount: MetricConfig{Enabled: false}, - ApachedruidIngestPersistsCPU: MetricConfig{Enabled: false}, - ApachedruidIngestPersistsFailed: MetricConfig{Enabled: false}, - ApachedruidIngestPersistsTime: MetricConfig{Enabled: false}, - ApachedruidIngestRowsOutput: MetricConfig{Enabled: false}, - ApachedruidIngestSegmentsCount: MetricConfig{Enabled: false}, - ApachedruidIngestShuffleBytes: MetricConfig{Enabled: false}, - ApachedruidIngestShuffleRequests: MetricConfig{Enabled: false}, - ApachedruidIngestSinkCount: MetricConfig{Enabled: false}, - ApachedruidIngestTombstonesCount: MetricConfig{Enabled: false}, - ApachedruidIntervalCompactedCount: MetricConfig{Enabled: false}, - ApachedruidIntervalSkipCompactCount: MetricConfig{Enabled: false}, - ApachedruidIntervalWaitCompactCount: MetricConfig{Enabled: false}, - ApachedruidJettyNumOpenConnections: MetricConfig{Enabled: false}, - ApachedruidJettyThreadPoolBusy: MetricConfig{Enabled: false}, - ApachedruidJettyThreadPoolIdle: MetricConfig{Enabled: false}, - ApachedruidJettyThreadPoolIsLowOnThreads: MetricConfig{Enabled: false}, - ApachedruidJettyThreadPoolMax: MetricConfig{Enabled: false}, - ApachedruidJettyThreadPoolMin: MetricConfig{Enabled: false}, - ApachedruidJettyThreadPoolQueueSize: MetricConfig{Enabled: false}, - ApachedruidJettyThreadPoolTotal: MetricConfig{Enabled: false}, - ApachedruidJvmBufferpoolCapacity: MetricConfig{Enabled: false}, - ApachedruidJvmBufferpoolCount: MetricConfig{Enabled: false}, - ApachedruidJvmBufferpoolUsed: MetricConfig{Enabled: false}, - ApachedruidJvmGcCount: MetricConfig{Enabled: false}, - ApachedruidJvmGcCPU: MetricConfig{Enabled: false}, - ApachedruidJvmMemCommitted: MetricConfig{Enabled: false}, - ApachedruidJvmMemInit: MetricConfig{Enabled: false}, - ApachedruidJvmMemMax: MetricConfig{Enabled: false}, - ApachedruidJvmMemUsed: MetricConfig{Enabled: false}, - ApachedruidJvmPoolCommitted: MetricConfig{Enabled: false}, - ApachedruidJvmPoolInit: MetricConfig{Enabled: false}, - ApachedruidJvmPoolMax: MetricConfig{Enabled: false}, - ApachedruidJvmPoolUsed: MetricConfig{Enabled: false}, - ApachedruidKillPendingSegmentsCount: MetricConfig{Enabled: false}, - ApachedruidKillTaskCount: MetricConfig{Enabled: false}, - ApachedruidKillTaskAvailableSlotCount: MetricConfig{Enabled: false}, - ApachedruidKillTaskMaxSlotCount: MetricConfig{Enabled: false}, - ApachedruidMergeBufferPendingRequests: MetricConfig{Enabled: false}, - ApachedruidMetadataKillAuditCount: MetricConfig{Enabled: false}, - ApachedruidMetadataKillCompactionCount: MetricConfig{Enabled: false}, - ApachedruidMetadataKillDatasourceCount: MetricConfig{Enabled: false}, - ApachedruidMetadataKillRuleCount: MetricConfig{Enabled: false}, - ApachedruidMetadataKillSupervisorCount: MetricConfig{Enabled: false}, - ApachedruidMetadatacacheInitTime: MetricConfig{Enabled: false}, - ApachedruidMetadatacacheRefreshCount: MetricConfig{Enabled: false}, - ApachedruidMetadatacacheRefreshTime: MetricConfig{Enabled: false}, - ApachedruidQueryByteLimitExceededCount: MetricConfig{Enabled: false}, - ApachedruidQueryBytes: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaAverageBytes: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaErrors: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaEvictions: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaHitRate: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaHits: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaMisses: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaNumEntries: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaPutError: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaPutOk: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaPutOversized: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaSizeBytes: MetricConfig{Enabled: false}, - ApachedruidQueryCacheDeltaTimeouts: MetricConfig{Enabled: false}, - ApachedruidQueryCacheMemcachedDelta: MetricConfig{Enabled: false}, - ApachedruidQueryCacheMemcachedTotal: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalAverageBytes: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalErrors: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalEvictions: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalHitRate: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalHits: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalMisses: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalNumEntries: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalPutError: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalPutOk: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalPutOversized: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalSizeBytes: MetricConfig{Enabled: false}, - ApachedruidQueryCacheTotalTimeouts: MetricConfig{Enabled: false}, - ApachedruidQueryCount: MetricConfig{Enabled: false}, - ApachedruidQueryCPUTime: MetricConfig{Enabled: false}, - ApachedruidQueryFailedCount: MetricConfig{Enabled: false}, - ApachedruidQueryInterruptedCount: MetricConfig{Enabled: false}, - ApachedruidQueryNodeBackpressure: MetricConfig{Enabled: false}, - ApachedruidQueryNodeBytes: MetricConfig{Enabled: false}, - ApachedruidQueryNodeTime: MetricConfig{Enabled: false}, - ApachedruidQueryNodeTtfb: MetricConfig{Enabled: false}, - ApachedruidQueryPriority: MetricConfig{Enabled: false}, - ApachedruidQueryRowLimitExceededCount: MetricConfig{Enabled: false}, - ApachedruidQuerySegmentTime: MetricConfig{Enabled: false}, - ApachedruidQuerySegmentAndCacheTime: MetricConfig{Enabled: false}, - ApachedruidQuerySegmentsCount: MetricConfig{Enabled: false}, - ApachedruidQuerySuccessCount: MetricConfig{Enabled: false}, - ApachedruidQueryTime: MetricConfig{Enabled: false}, - ApachedruidQueryTimeoutCount: MetricConfig{Enabled: false}, - ApachedruidQueryWaitTime: MetricConfig{Enabled: false}, - ApachedruidSegmentAddedBytes: MetricConfig{Enabled: false}, - ApachedruidSegmentAssignSkippedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentAssignedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentCompactedBytes: MetricConfig{Enabled: false}, - ApachedruidSegmentCompactedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentCount: MetricConfig{Enabled: false}, - ApachedruidSegmentDeletedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentDropQueueCount: MetricConfig{Enabled: false}, - ApachedruidSegmentDropSkippedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentDroppedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentLoadQueueAssigned: MetricConfig{Enabled: false}, - ApachedruidSegmentLoadQueueCancelled: MetricConfig{Enabled: false}, - ApachedruidSegmentLoadQueueCount: MetricConfig{Enabled: false}, - ApachedruidSegmentLoadQueueFailed: MetricConfig{Enabled: false}, - ApachedruidSegmentLoadQueueSize: MetricConfig{Enabled: false}, - ApachedruidSegmentLoadQueueSuccess: MetricConfig{Enabled: false}, - ApachedruidSegmentMax: MetricConfig{Enabled: false}, - ApachedruidSegmentMoveSkippedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentMovedBytes: MetricConfig{Enabled: false}, - ApachedruidSegmentMovedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentNukedBytes: MetricConfig{Enabled: false}, - ApachedruidSegmentOverShadowedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentPendingDelete: MetricConfig{Enabled: false}, - ApachedruidSegmentRowCountAvg: MetricConfig{Enabled: false}, - ApachedruidSegmentRowCountRangeCount: MetricConfig{Enabled: false}, - ApachedruidSegmentScanActive: MetricConfig{Enabled: false}, - ApachedruidSegmentScanPending: MetricConfig{Enabled: false}, - ApachedruidSegmentSize: MetricConfig{Enabled: false}, - ApachedruidSegmentSkipCompactBytes: MetricConfig{Enabled: false}, - ApachedruidSegmentSkipCompactCount: MetricConfig{Enabled: false}, - ApachedruidSegmentUnavailableCount: MetricConfig{Enabled: false}, - ApachedruidSegmentUnderReplicatedCount: MetricConfig{Enabled: false}, - ApachedruidSegmentUnneededCount: MetricConfig{Enabled: false}, - ApachedruidSegmentUsed: MetricConfig{Enabled: false}, - ApachedruidSegmentUsedPercent: MetricConfig{Enabled: false}, - ApachedruidSegmentWaitCompactBytes: MetricConfig{Enabled: false}, - ApachedruidSegmentWaitCompactCount: MetricConfig{Enabled: false}, - ApachedruidServerviewInitTime: MetricConfig{Enabled: false}, - ApachedruidServerviewSyncHealthy: MetricConfig{Enabled: false}, - ApachedruidServerviewSyncUnstableTime: MetricConfig{Enabled: false}, - ApachedruidSQLQueryBytes: MetricConfig{Enabled: false}, - ApachedruidSQLQueryPlanningTimeMs: MetricConfig{Enabled: false}, - ApachedruidSQLQueryTime: MetricConfig{Enabled: false}, - ApachedruidSubqueryByteLimitCount: MetricConfig{Enabled: false}, - ApachedruidSubqueryFallbackCount: MetricConfig{Enabled: false}, - ApachedruidSubqueryFallbackInsufficientTypeCount: MetricConfig{Enabled: false}, - ApachedruidSubqueryFallbackUnknownReasonCount: MetricConfig{Enabled: false}, - ApachedruidSubqueryRowLimitCount: MetricConfig{Enabled: false}, - ApachedruidSysCPU: MetricConfig{Enabled: false}, - ApachedruidSysDiskQueue: MetricConfig{Enabled: false}, - ApachedruidSysDiskReadCount: MetricConfig{Enabled: false}, - ApachedruidSysDiskReadSize: MetricConfig{Enabled: false}, - ApachedruidSysDiskTransferTime: MetricConfig{Enabled: false}, - ApachedruidSysDiskWriteCount: MetricConfig{Enabled: false}, - ApachedruidSysDiskWriteSize: MetricConfig{Enabled: false}, - ApachedruidSysFsFilesCount: MetricConfig{Enabled: false}, - ApachedruidSysFsFilesFree: MetricConfig{Enabled: false}, - ApachedruidSysFsMax: MetricConfig{Enabled: false}, - ApachedruidSysFsUsed: MetricConfig{Enabled: false}, - ApachedruidSysLa1: MetricConfig{Enabled: false}, - ApachedruidSysLa15: MetricConfig{Enabled: false}, - ApachedruidSysLa5: MetricConfig{Enabled: false}, - ApachedruidSysMemFree: MetricConfig{Enabled: false}, - ApachedruidSysMemMax: MetricConfig{Enabled: false}, - ApachedruidSysMemUsed: MetricConfig{Enabled: false}, - ApachedruidSysNetReadDropped: MetricConfig{Enabled: false}, - ApachedruidSysNetReadErrors: MetricConfig{Enabled: false}, - ApachedruidSysNetReadPackets: MetricConfig{Enabled: false}, - ApachedruidSysNetReadSize: MetricConfig{Enabled: false}, - ApachedruidSysNetWriteCollisions: MetricConfig{Enabled: false}, - ApachedruidSysNetWriteErrors: MetricConfig{Enabled: false}, - ApachedruidSysNetWritePackets: MetricConfig{Enabled: false}, - ApachedruidSysNetWriteSize: MetricConfig{Enabled: false}, - ApachedruidSysStorageUsed: MetricConfig{Enabled: false}, - ApachedruidSysSwapFree: MetricConfig{Enabled: false}, - ApachedruidSysSwapMax: MetricConfig{Enabled: false}, - ApachedruidSysSwapPageIn: MetricConfig{Enabled: false}, - ApachedruidSysSwapPageOut: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4ActiveOpens: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4AttemptFails: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4EstabResets: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4InErrs: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4InSegs: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4OutRsts: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4OutSegs: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4PassiveOpens: MetricConfig{Enabled: false}, - ApachedruidSysTcpv4RetransSegs: MetricConfig{Enabled: false}, - ApachedruidSysUptime: MetricConfig{Enabled: false}, - ApachedruidTaskActionBatchAttempts: MetricConfig{Enabled: false}, - ApachedruidTaskActionBatchQueueTime: MetricConfig{Enabled: false}, - ApachedruidTaskActionBatchRunTime: MetricConfig{Enabled: false}, - ApachedruidTaskActionBatchSize: MetricConfig{Enabled: false}, - ApachedruidTaskActionFailedCount: MetricConfig{Enabled: false}, - ApachedruidTaskActionLogTime: MetricConfig{Enabled: false}, - ApachedruidTaskActionRunTime: MetricConfig{Enabled: false}, - ApachedruidTaskActionSuccessCount: MetricConfig{Enabled: false}, - ApachedruidTaskFailedCount: MetricConfig{Enabled: false}, - ApachedruidTaskPendingCount: MetricConfig{Enabled: false}, - ApachedruidTaskPendingTime: MetricConfig{Enabled: false}, - ApachedruidTaskRunTime: MetricConfig{Enabled: false}, - ApachedruidTaskRunningCount: MetricConfig{Enabled: false}, - ApachedruidTaskSegmentAvailabilityWaitTime: MetricConfig{Enabled: false}, - ApachedruidTaskSuccessCount: MetricConfig{Enabled: false}, - ApachedruidTaskWaitingCount: MetricConfig{Enabled: false}, - ApachedruidTaskSlotBlacklistedCount: MetricConfig{Enabled: false}, - ApachedruidTaskSlotIdleCount: MetricConfig{Enabled: false}, - ApachedruidTaskSlotLazyCount: MetricConfig{Enabled: false}, - ApachedruidTaskSlotTotalCount: MetricConfig{Enabled: false}, - ApachedruidTaskSlotUsedCount: MetricConfig{Enabled: false}, - ApachedruidTierHistoricalCount: MetricConfig{Enabled: false}, - ApachedruidTierReplicationFactor: MetricConfig{Enabled: false}, - ApachedruidTierRequiredCapacity: MetricConfig{Enabled: false}, - ApachedruidTierTotalCapacity: MetricConfig{Enabled: false}, - ApachedruidWorkerTaskFailedCount: MetricConfig{Enabled: false}, - ApachedruidWorkerTaskSuccessCount: MetricConfig{Enabled: false}, - ApachedruidWorkerTaskSlotIdleCount: MetricConfig{Enabled: false}, - ApachedruidWorkerTaskSlotTotalCount: MetricConfig{Enabled: false}, - ApachedruidWorkerTaskSlotUsedCount: MetricConfig{Enabled: false}, - ApachedruidZkConnected: MetricConfig{Enabled: false}, - ApachedruidZkReconnectTime: MetricConfig{Enabled: false}, - }, - ResourceAttributes: ResourceAttributesConfig{ - ApachedruidClusterName: ResourceAttributeConfig{Enabled: false}, - ApachedruidNodeHost: ResourceAttributeConfig{Enabled: false}, - ApachedruidNodeService: ResourceAttributeConfig{Enabled: false}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := loadMetricsBuilderConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } - }) - } -} - -func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { - cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) - require.NoError(t, err) - sub, err := cm.Sub(name) - require.NoError(t, err) - cfg := DefaultMetricsBuilderConfig() - require.NoError(t, component.UnmarshalConfig(sub, &cfg)) - return cfg -} - -func TestResourceAttributesConfig(t *testing.T) { - tests := []struct { - name string - want ResourceAttributesConfig - }{ - { - name: "default", - want: DefaultResourceAttributesConfig(), - }, - { - name: "all_set", - want: ResourceAttributesConfig{ - ApachedruidClusterName: ResourceAttributeConfig{Enabled: true}, - ApachedruidNodeHost: ResourceAttributeConfig{Enabled: true}, - ApachedruidNodeService: ResourceAttributeConfig{Enabled: true}, - }, - }, - { - name: "none_set", - want: ResourceAttributesConfig{ - ApachedruidClusterName: ResourceAttributeConfig{Enabled: false}, - ApachedruidNodeHost: ResourceAttributeConfig{Enabled: false}, - ApachedruidNodeService: ResourceAttributeConfig{Enabled: false}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := loadResourceAttributesConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } - }) - } -} - -func loadResourceAttributesConfig(t *testing.T, name string) ResourceAttributesConfig { - cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) - require.NoError(t, err) - sub, err := cm.Sub(name) - require.NoError(t, err) - sub, err = sub.Sub("resource_attributes") - require.NoError(t, err) - cfg := DefaultResourceAttributesConfig() - require.NoError(t, component.UnmarshalConfig(sub, &cfg)) - return cfg -} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_metrics.go b/receiver/apachedruidreceiver/internal/metadata/generated_metrics.go deleted file mode 100644 index f28e9095cf8d6..0000000000000 --- a/receiver/apachedruidreceiver/internal/metadata/generated_metrics.go +++ /dev/null @@ -1,14859 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/receiver" -) - -type metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.compact.segment_analyzer.fetch_and_process_millis metric with initial data. -func (m *metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis) init() { - m.data.SetName("apachedruid.compact.segment_analyzer.fetch_and_process_millis") - m.data.SetDescription("Time taken to fetch and process segments to infer the schema for the compaction task to run.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, compactTaskTypeAttributeValue string, compactDataSourceAttributeValue string, compactGroupIDAttributeValue string, compactTagsAttributeValue string, compactTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", compactTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", compactDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", compactGroupIDAttributeValue) - dp.Attributes().PutStr("tags", compactTagsAttributeValue) - dp.Attributes().PutStr("task_id", compactTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis(cfg MetricConfig) metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis { - m := metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidCompactTaskCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.compact.task.count metric with initial data. -func (m *metricApachedruidCompactTaskCount) init() { - m.data.SetName("apachedruid.compact.task.count") - m.data.SetDescription("Number of tasks issued in the auto compaction run.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidCompactTaskCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidCompactTaskCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidCompactTaskCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidCompactTaskCount(cfg MetricConfig) metricApachedruidCompactTaskCount { - m := metricApachedruidCompactTaskCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidCompactTaskAvailableSlotCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.compact_task.available_slot.count metric with initial data. -func (m *metricApachedruidCompactTaskAvailableSlotCount) init() { - m.data.SetName("apachedruid.compact_task.available_slot.count") - m.data.SetDescription("Number of available task slots that can be used for auto compaction tasks in the auto compaction run. This is the max number of task slots minus any currently running compaction tasks.") - m.data.SetUnit("{slots}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidCompactTaskAvailableSlotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidCompactTaskAvailableSlotCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidCompactTaskAvailableSlotCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidCompactTaskAvailableSlotCount(cfg MetricConfig) metricApachedruidCompactTaskAvailableSlotCount { - m := metricApachedruidCompactTaskAvailableSlotCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidCompactTaskMaxSlotCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.compact_task.max_slot.count metric with initial data. -func (m *metricApachedruidCompactTaskMaxSlotCount) init() { - m.data.SetName("apachedruid.compact_task.max_slot.count") - m.data.SetDescription("Maximum number of task slots available for auto compaction tasks in the auto compaction run.") - m.data.SetUnit("{slots}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidCompactTaskMaxSlotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidCompactTaskMaxSlotCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidCompactTaskMaxSlotCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidCompactTaskMaxSlotCount(cfg MetricConfig) metricApachedruidCompactTaskMaxSlotCount { - m := metricApachedruidCompactTaskMaxSlotCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidCoordinatorGlobalTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.coordinator.global.time metric with initial data. -func (m *metricApachedruidCoordinatorGlobalTime) init() { - m.data.SetName("apachedruid.coordinator.global.time") - m.data.SetDescription("Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. For example, Historical Management or Indexing.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidCoordinatorGlobalTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coordinatorDutyGroupAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("duty_group", coordinatorDutyGroupAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidCoordinatorGlobalTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidCoordinatorGlobalTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidCoordinatorGlobalTime(cfg MetricConfig) metricApachedruidCoordinatorGlobalTime { - m := metricApachedruidCoordinatorGlobalTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidCoordinatorTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.coordinator.time metric with initial data. -func (m *metricApachedruidCoordinatorTime) init() { - m.data.SetName("apachedruid.coordinator.time") - m.data.SetDescription("Approximate Coordinator duty runtime in milliseconds.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidCoordinatorTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coordinatorDutyAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("duty", coordinatorDutyAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidCoordinatorTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidCoordinatorTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidCoordinatorTime(cfg MetricConfig) metricApachedruidCoordinatorTime { - m := metricApachedruidCoordinatorTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestBytesReceived struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.bytes.received metric with initial data. -func (m *metricApachedruidIngestBytesReceived) init() { - m.data.SetName("apachedruid.ingest.bytes.received") - m.data.SetDescription("Number of bytes received by the `EventReceiverFirehose`.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestBytesReceived) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string, ingestServiceNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("service_name", ingestServiceNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestBytesReceived) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestBytesReceived) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestBytesReceived(cfg MetricConfig) metricApachedruidIngestBytesReceived { - m := metricApachedruidIngestBytesReceived{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.count metric with initial data. -func (m *metricApachedruidIngestCount) init() { - m.data.SetName("apachedruid.ingest.count") - m.data.SetDescription("Count of `1` every time an ingestion job runs (includes compaction jobs). Aggregate using dimensions.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) - dp.Attributes().PutStr("task_ingestion_mode", ingestTaskIngestionModeAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestCount(cfg MetricConfig) metricApachedruidIngestCount { - m := metricApachedruidIngestCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestEventsBuffered struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.events.buffered metric with initial data. -func (m *metricApachedruidIngestEventsBuffered) init() { - m.data.SetName("apachedruid.ingest.events.buffered") - m.data.SetDescription("Number of events queued in the `EventReceiverFirehose` buffer.") - m.data.SetUnit("{events}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestEventsBuffered) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestServiceNameAttributeValue string, ingestBufferCapacityAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("service_name", ingestServiceNameAttributeValue) - dp.Attributes().PutStr("buffer_capacity", ingestBufferCapacityAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestEventsBuffered) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestEventsBuffered) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestEventsBuffered(cfg MetricConfig) metricApachedruidIngestEventsBuffered { - m := metricApachedruidIngestEventsBuffered{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestEventsDuplicate struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.events.duplicate metric with initial data. -func (m *metricApachedruidIngestEventsDuplicate) init() { - m.data.SetName("apachedruid.ingest.events.duplicate") - m.data.SetDescription("Number of events rejected because the events are duplicated.") - m.data.SetUnit("{events}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestEventsDuplicate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestEventsDuplicate) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestEventsDuplicate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestEventsDuplicate(cfg MetricConfig) metricApachedruidIngestEventsDuplicate { - m := metricApachedruidIngestEventsDuplicate{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestEventsMessageGap struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.events.message_gap metric with initial data. -func (m *metricApachedruidIngestEventsMessageGap) init() { - m.data.SetName("apachedruid.ingest.events.message_gap") - m.data.SetDescription("Time gap in milliseconds between the latest ingested event timestamp and the current system timestamp of metrics emission. If the value is increasing but lag is low, Druid may not be receiving new data. This metric is reset as new tasks spawn up.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestEventsMessageGap) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestEventsMessageGap) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestEventsMessageGap) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestEventsMessageGap(cfg MetricConfig) metricApachedruidIngestEventsMessageGap { - m := metricApachedruidIngestEventsMessageGap{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestEventsProcessed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.events.processed metric with initial data. -func (m *metricApachedruidIngestEventsProcessed) init() { - m.data.SetName("apachedruid.ingest.events.processed") - m.data.SetDescription("Number of events processed per emission period.") - m.data.SetUnit("{events}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestEventsProcessed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestEventsProcessed) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestEventsProcessed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestEventsProcessed(cfg MetricConfig) metricApachedruidIngestEventsProcessed { - m := metricApachedruidIngestEventsProcessed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestEventsProcessedWithError struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.events.processed_with_error metric with initial data. -func (m *metricApachedruidIngestEventsProcessedWithError) init() { - m.data.SetName("apachedruid.ingest.events.processed_with_error") - m.data.SetDescription("Number of events processed with some partial errors per emission period. Events processed with partial errors are counted towards both this metric and `ingest/events/processed`.") - m.data.SetUnit("{events}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestEventsProcessedWithError) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestEventsProcessedWithError) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestEventsProcessedWithError) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestEventsProcessedWithError(cfg MetricConfig) metricApachedruidIngestEventsProcessedWithError { - m := metricApachedruidIngestEventsProcessedWithError{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestEventsThrownAway struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.events.thrown_away metric with initial data. -func (m *metricApachedruidIngestEventsThrownAway) init() { - m.data.SetName("apachedruid.ingest.events.thrown_away") - m.data.SetDescription("Number of events rejected because they are null, or filtered by `transformSpec`, or outside one of `lateMessageRejectionPeriod`, `earlyMessageRejectionPeriod`, or `windowPeriod`.") - m.data.SetUnit("{events}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestEventsThrownAway) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestEventsThrownAway) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestEventsThrownAway) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestEventsThrownAway(cfg MetricConfig) metricApachedruidIngestEventsThrownAway { - m := metricApachedruidIngestEventsThrownAway{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestEventsUnparseable struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.events.unparseable metric with initial data. -func (m *metricApachedruidIngestEventsUnparseable) init() { - m.data.SetName("apachedruid.ingest.events.unparseable") - m.data.SetDescription("Number of events rejected because the events are unparseable.") - m.data.SetUnit("{events}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestEventsUnparseable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestEventsUnparseable) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestEventsUnparseable) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestEventsUnparseable(cfg MetricConfig) metricApachedruidIngestEventsUnparseable { - m := metricApachedruidIngestEventsUnparseable{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestHandoffCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.handoff.count metric with initial data. -func (m *metricApachedruidIngestHandoffCount) init() { - m.data.SetName("apachedruid.ingest.handoff.count") - m.data.SetDescription("Number of handoffs that happened.") - m.data.SetUnit("{handoffs}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestHandoffCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestHandoffCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestHandoffCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestHandoffCount(cfg MetricConfig) metricApachedruidIngestHandoffCount { - m := metricApachedruidIngestHandoffCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestHandoffFailed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.handoff.failed metric with initial data. -func (m *metricApachedruidIngestHandoffFailed) init() { - m.data.SetName("apachedruid.ingest.handoff.failed") - m.data.SetDescription("Number of handoffs that failed.") - m.data.SetUnit("{handoffs}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestHandoffFailed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestHandoffFailed) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestHandoffFailed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestHandoffFailed(cfg MetricConfig) metricApachedruidIngestHandoffFailed { - m := metricApachedruidIngestHandoffFailed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestHandoffTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.handoff.time metric with initial data. -func (m *metricApachedruidIngestHandoffTime) init() { - m.data.SetName("apachedruid.ingest.handoff.time") - m.data.SetDescription("Total number of milliseconds taken to handoff a set of segments.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestHandoffTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestHandoffTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestHandoffTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestHandoffTime(cfg MetricConfig) metricApachedruidIngestHandoffTime { - m := metricApachedruidIngestHandoffTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestInputBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.input.bytes metric with initial data. -func (m *metricApachedruidIngestInputBytes) init() { - m.data.SetName("apachedruid.ingest.input.bytes") - m.data.SetDescription("Number of bytes read from input sources, after decompression but prior to parsing. This covers all data read, including data that does not end up being fully processed and ingested. For example, this includes data that ends up being rejected for being unparseable or filtered out.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestInputBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestInputBytes) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestInputBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestInputBytes(cfg MetricConfig) metricApachedruidIngestInputBytes { - m := metricApachedruidIngestInputBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestKafkaAvgLag struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.kafka.avg_lag metric with initial data. -func (m *metricApachedruidIngestKafkaAvgLag) init() { - m.data.SetName("apachedruid.ingest.kafka.avg_lag") - m.data.SetDescription("Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestKafkaAvgLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("stream", ingestStreamAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestKafkaAvgLag) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestKafkaAvgLag) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestKafkaAvgLag(cfg MetricConfig) metricApachedruidIngestKafkaAvgLag { - m := metricApachedruidIngestKafkaAvgLag{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestKafkaLag struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.kafka.lag metric with initial data. -func (m *metricApachedruidIngestKafkaLag) init() { - m.data.SetName("apachedruid.ingest.kafka.lag") - m.data.SetDescription("Total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestKafkaLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("stream", ingestStreamAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestKafkaLag) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestKafkaLag) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestKafkaLag(cfg MetricConfig) metricApachedruidIngestKafkaLag { - m := metricApachedruidIngestKafkaLag{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestKafkaMaxLag struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.kafka.max_lag metric with initial data. -func (m *metricApachedruidIngestKafkaMaxLag) init() { - m.data.SetName("apachedruid.ingest.kafka.max_lag") - m.data.SetDescription("Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestKafkaMaxLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("stream", ingestStreamAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestKafkaMaxLag) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestKafkaMaxLag) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestKafkaMaxLag(cfg MetricConfig) metricApachedruidIngestKafkaMaxLag { - m := metricApachedruidIngestKafkaMaxLag{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestKafkaPartitionLag struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.kafka.partition_lag metric with initial data. -func (m *metricApachedruidIngestKafkaPartitionLag) init() { - m.data.SetName("apachedruid.ingest.kafka.partition_lag") - m.data.SetDescription("Partition-wise lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers. Minimum emission period for this metric is a minute.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestKafkaPartitionLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestPartitionAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("partition", ingestPartitionAttributeValue) - dp.Attributes().PutStr("stream", ingestStreamAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestKafkaPartitionLag) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestKafkaPartitionLag) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestKafkaPartitionLag(cfg MetricConfig) metricApachedruidIngestKafkaPartitionLag { - m := metricApachedruidIngestKafkaPartitionLag{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestKinesisAvgLagTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.kinesis.avg_lag.time metric with initial data. -func (m *metricApachedruidIngestKinesisAvgLagTime) init() { - m.data.SetName("apachedruid.ingest.kinesis.avg_lag.time") - m.data.SetDescription("Average lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestKinesisAvgLagTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("stream", ingestStreamAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestKinesisAvgLagTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestKinesisAvgLagTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestKinesisAvgLagTime(cfg MetricConfig) metricApachedruidIngestKinesisAvgLagTime { - m := metricApachedruidIngestKinesisAvgLagTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestKinesisLagTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.kinesis.lag.time metric with initial data. -func (m *metricApachedruidIngestKinesisLagTime) init() { - m.data.SetName("apachedruid.ingest.kinesis.lag.time") - m.data.SetDescription("Total lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestKinesisLagTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("stream", ingestStreamAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestKinesisLagTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestKinesisLagTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestKinesisLagTime(cfg MetricConfig) metricApachedruidIngestKinesisLagTime { - m := metricApachedruidIngestKinesisLagTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestKinesisMaxLagTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.kinesis.max_lag.time metric with initial data. -func (m *metricApachedruidIngestKinesisMaxLagTime) init() { - m.data.SetName("apachedruid.ingest.kinesis.max_lag.time") - m.data.SetDescription("Max lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestKinesisMaxLagTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("stream", ingestStreamAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestKinesisMaxLagTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestKinesisMaxLagTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestKinesisMaxLagTime(cfg MetricConfig) metricApachedruidIngestKinesisMaxLagTime { - m := metricApachedruidIngestKinesisMaxLagTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestKinesisPartitionLagTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.kinesis.partition_lag.time metric with initial data. -func (m *metricApachedruidIngestKinesisPartitionLagTime) init() { - m.data.SetName("apachedruid.ingest.kinesis.partition_lag.time") - m.data.SetDescription("Partition-wise lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis. Minimum emission period for this metric is a minute.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestKinesisPartitionLagTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestPartitionAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("partition", ingestPartitionAttributeValue) - dp.Attributes().PutStr("stream", ingestStreamAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestKinesisPartitionLagTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestKinesisPartitionLagTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestKinesisPartitionLagTime(cfg MetricConfig) metricApachedruidIngestKinesisPartitionLagTime { - m := metricApachedruidIngestKinesisPartitionLagTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestMergeCPU struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.merge.cpu metric with initial data. -func (m *metricApachedruidIngestMergeCPU) init() { - m.data.SetName("apachedruid.ingest.merge.cpu") - m.data.SetDescription("CPU time in Nanoseconds spent on merging intermediate segments.") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestMergeCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestMergeCPU) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestMergeCPU) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestMergeCPU(cfg MetricConfig) metricApachedruidIngestMergeCPU { - m := metricApachedruidIngestMergeCPU{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestMergeTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.merge.time metric with initial data. -func (m *metricApachedruidIngestMergeTime) init() { - m.data.SetName("apachedruid.ingest.merge.time") - m.data.SetDescription("Milliseconds spent merging intermediate segments.") - m.data.SetUnit("ms") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestMergeTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestMergeTime) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestMergeTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestMergeTime(cfg MetricConfig) metricApachedruidIngestMergeTime { - m := metricApachedruidIngestMergeTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestNoticesQueueSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.notices.queue_size metric with initial data. -func (m *metricApachedruidIngestNoticesQueueSize) init() { - m.data.SetName("apachedruid.ingest.notices.queue_size") - m.data.SetDescription("Number of pending notices to be processed by the coordinator.") - m.data.SetUnit("{notices}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestNoticesQueueSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestNoticesQueueSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestNoticesQueueSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestNoticesQueueSize(cfg MetricConfig) metricApachedruidIngestNoticesQueueSize { - m := metricApachedruidIngestNoticesQueueSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestNoticesTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.notices.time metric with initial data. -func (m *metricApachedruidIngestNoticesTime) init() { - m.data.SetName("apachedruid.ingest.notices.time") - m.data.SetDescription("Milliseconds taken to process a notice by the supervisor.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestNoticesTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestNoticesTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestNoticesTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestNoticesTime(cfg MetricConfig) metricApachedruidIngestNoticesTime { - m := metricApachedruidIngestNoticesTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestPauseTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.pause.time metric with initial data. -func (m *metricApachedruidIngestPauseTime) init() { - m.data.SetName("apachedruid.ingest.pause.time") - m.data.SetDescription("Milliseconds spent by a task in a paused state without ingesting.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestPauseTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestPauseTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestPauseTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestPauseTime(cfg MetricConfig) metricApachedruidIngestPauseTime { - m := metricApachedruidIngestPauseTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestPersistsBackPressure struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.persists.back_pressure metric with initial data. -func (m *metricApachedruidIngestPersistsBackPressure) init() { - m.data.SetName("apachedruid.ingest.persists.back_pressure") - m.data.SetDescription("Milliseconds spent creating persist tasks and blocking waiting for them to finish.") - m.data.SetUnit("ms") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestPersistsBackPressure) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestPersistsBackPressure) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestPersistsBackPressure) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestPersistsBackPressure(cfg MetricConfig) metricApachedruidIngestPersistsBackPressure { - m := metricApachedruidIngestPersistsBackPressure{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestPersistsCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.persists.count metric with initial data. -func (m *metricApachedruidIngestPersistsCount) init() { - m.data.SetName("apachedruid.ingest.persists.count") - m.data.SetDescription("Number of times persist occurred.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestPersistsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestPersistsCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestPersistsCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestPersistsCount(cfg MetricConfig) metricApachedruidIngestPersistsCount { - m := metricApachedruidIngestPersistsCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestPersistsCPU struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.persists.cpu metric with initial data. -func (m *metricApachedruidIngestPersistsCPU) init() { - m.data.SetName("apachedruid.ingest.persists.cpu") - m.data.SetDescription("CPU time in nanoseconds spent on doing intermediate persist.") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestPersistsCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestPersistsCPU) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestPersistsCPU) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestPersistsCPU(cfg MetricConfig) metricApachedruidIngestPersistsCPU { - m := metricApachedruidIngestPersistsCPU{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestPersistsFailed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.persists.failed metric with initial data. -func (m *metricApachedruidIngestPersistsFailed) init() { - m.data.SetName("apachedruid.ingest.persists.failed") - m.data.SetDescription("Number of persists that failed.") - m.data.SetUnit("{persists}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestPersistsFailed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestPersistsFailed) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestPersistsFailed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestPersistsFailed(cfg MetricConfig) metricApachedruidIngestPersistsFailed { - m := metricApachedruidIngestPersistsFailed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestPersistsTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.persists.time metric with initial data. -func (m *metricApachedruidIngestPersistsTime) init() { - m.data.SetName("apachedruid.ingest.persists.time") - m.data.SetDescription("Milliseconds spent doing intermediate persist.") - m.data.SetUnit("ms") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestPersistsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestPersistsTime) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestPersistsTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestPersistsTime(cfg MetricConfig) metricApachedruidIngestPersistsTime { - m := metricApachedruidIngestPersistsTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestRowsOutput struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.rows.output metric with initial data. -func (m *metricApachedruidIngestRowsOutput) init() { - m.data.SetName("apachedruid.ingest.rows.output") - m.data.SetDescription("Number of Druid rows persisted.") - m.data.SetUnit("{rows}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestRowsOutput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestRowsOutput) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestRowsOutput) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestRowsOutput(cfg MetricConfig) metricApachedruidIngestRowsOutput { - m := metricApachedruidIngestRowsOutput{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestSegmentsCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.segments.count metric with initial data. -func (m *metricApachedruidIngestSegmentsCount) init() { - m.data.SetName("apachedruid.ingest.segments.count") - m.data.SetDescription("Count of final segments created by job (includes tombstones).") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestSegmentsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) - dp.Attributes().PutStr("task_ingestion_mode", ingestTaskIngestionModeAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestSegmentsCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestSegmentsCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestSegmentsCount(cfg MetricConfig) metricApachedruidIngestSegmentsCount { - m := metricApachedruidIngestSegmentsCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestShuffleBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.shuffle.bytes metric with initial data. -func (m *metricApachedruidIngestShuffleBytes) init() { - m.data.SetName("apachedruid.ingest.shuffle.bytes") - m.data.SetDescription("Number of bytes shuffled per emission period.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestShuffleBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestSupervisorTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("supervisor_task_id", ingestSupervisorTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestShuffleBytes) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestShuffleBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestShuffleBytes(cfg MetricConfig) metricApachedruidIngestShuffleBytes { - m := metricApachedruidIngestShuffleBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestShuffleRequests struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.shuffle.requests metric with initial data. -func (m *metricApachedruidIngestShuffleRequests) init() { - m.data.SetName("apachedruid.ingest.shuffle.requests") - m.data.SetDescription("Number of shuffle requests per emission period.") - m.data.SetUnit("{requests}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestShuffleRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestSupervisorTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("supervisor_task_id", ingestSupervisorTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestShuffleRequests) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestShuffleRequests) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestShuffleRequests(cfg MetricConfig) metricApachedruidIngestShuffleRequests { - m := metricApachedruidIngestShuffleRequests{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestSinkCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.sink.count metric with initial data. -func (m *metricApachedruidIngestSinkCount) init() { - m.data.SetName("apachedruid.ingest.sink.count") - m.data.SetDescription("Number of sinks not handed off.") - m.data.SetUnit("{sinks}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestSinkCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestSinkCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestSinkCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestSinkCount(cfg MetricConfig) metricApachedruidIngestSinkCount { - m := metricApachedruidIngestSinkCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIngestTombstonesCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.ingest.tombstones.count metric with initial data. -func (m *metricApachedruidIngestTombstonesCount) init() { - m.data.SetName("apachedruid.ingest.tombstones.count") - m.data.SetDescription("Count of tombstones created by job.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIngestTombstonesCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", ingestTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", ingestDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", ingestGroupIDAttributeValue) - dp.Attributes().PutStr("tags", ingestTagsAttributeValue) - dp.Attributes().PutStr("task_id", ingestTaskIDAttributeValue) - dp.Attributes().PutStr("task_ingestion_mode", ingestTaskIngestionModeAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIngestTombstonesCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIngestTombstonesCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIngestTombstonesCount(cfg MetricConfig) metricApachedruidIngestTombstonesCount { - m := metricApachedruidIngestTombstonesCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIntervalCompactedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.interval.compacted.count metric with initial data. -func (m *metricApachedruidIntervalCompactedCount) init() { - m.data.SetName("apachedruid.interval.compacted.count") - m.data.SetDescription("Total number of intervals of this datasource that are already compacted with the spec set in the auto compaction config.") - m.data.SetUnit("{intervals}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIntervalCompactedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", intervalDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIntervalCompactedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIntervalCompactedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIntervalCompactedCount(cfg MetricConfig) metricApachedruidIntervalCompactedCount { - m := metricApachedruidIntervalCompactedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIntervalSkipCompactCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.interval.skip_compact.count metric with initial data. -func (m *metricApachedruidIntervalSkipCompactCount) init() { - m.data.SetName("apachedruid.interval.skip_compact.count") - m.data.SetDescription("Total number of intervals of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.") - m.data.SetUnit("{intervals}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIntervalSkipCompactCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", intervalDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIntervalSkipCompactCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIntervalSkipCompactCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIntervalSkipCompactCount(cfg MetricConfig) metricApachedruidIntervalSkipCompactCount { - m := metricApachedruidIntervalSkipCompactCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidIntervalWaitCompactCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.interval.wait_compact.count metric with initial data. -func (m *metricApachedruidIntervalWaitCompactCount) init() { - m.data.SetName("apachedruid.interval.wait_compact.count") - m.data.SetDescription("Total number of intervals of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).") - m.data.SetUnit("{intervals}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidIntervalWaitCompactCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", intervalDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidIntervalWaitCompactCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidIntervalWaitCompactCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidIntervalWaitCompactCount(cfg MetricConfig) metricApachedruidIntervalWaitCompactCount { - m := metricApachedruidIntervalWaitCompactCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJettyNumOpenConnections struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jetty.num_open_connections metric with initial data. -func (m *metricApachedruidJettyNumOpenConnections) init() { - m.data.SetName("apachedruid.jetty.num_open_connections") - m.data.SetDescription("Number of open jetty connections.") - m.data.SetUnit("{connections}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidJettyNumOpenConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJettyNumOpenConnections) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJettyNumOpenConnections) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJettyNumOpenConnections(cfg MetricConfig) metricApachedruidJettyNumOpenConnections { - m := metricApachedruidJettyNumOpenConnections{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJettyThreadPoolBusy struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jetty.thread_pool.busy metric with initial data. -func (m *metricApachedruidJettyThreadPoolBusy) init() { - m.data.SetName("apachedruid.jetty.thread_pool.busy") - m.data.SetDescription("Number of busy threads that has work to do from the worker queue.") - m.data.SetUnit("{threads}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidJettyThreadPoolBusy) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJettyThreadPoolBusy) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJettyThreadPoolBusy) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJettyThreadPoolBusy(cfg MetricConfig) metricApachedruidJettyThreadPoolBusy { - m := metricApachedruidJettyThreadPoolBusy{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJettyThreadPoolIdle struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jetty.thread_pool.idle metric with initial data. -func (m *metricApachedruidJettyThreadPoolIdle) init() { - m.data.SetName("apachedruid.jetty.thread_pool.idle") - m.data.SetDescription("Number of idle threads.") - m.data.SetUnit("{threads}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidJettyThreadPoolIdle) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJettyThreadPoolIdle) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJettyThreadPoolIdle) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJettyThreadPoolIdle(cfg MetricConfig) metricApachedruidJettyThreadPoolIdle { - m := metricApachedruidJettyThreadPoolIdle{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJettyThreadPoolIsLowOnThreads struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jetty.thread_pool.is_low_on_threads metric with initial data. -func (m *metricApachedruidJettyThreadPoolIsLowOnThreads) init() { - m.data.SetName("apachedruid.jetty.thread_pool.is_low_on_threads") - m.data.SetDescription("A rough indicator of whether number of total workable threads allocated is enough to handle the works in the work queue.") - m.data.SetUnit("{threads}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidJettyThreadPoolIsLowOnThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJettyThreadPoolIsLowOnThreads) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJettyThreadPoolIsLowOnThreads) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJettyThreadPoolIsLowOnThreads(cfg MetricConfig) metricApachedruidJettyThreadPoolIsLowOnThreads { - m := metricApachedruidJettyThreadPoolIsLowOnThreads{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJettyThreadPoolMax struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jetty.thread_pool.max metric with initial data. -func (m *metricApachedruidJettyThreadPoolMax) init() { - m.data.SetName("apachedruid.jetty.thread_pool.max") - m.data.SetDescription("Number of maximum threads allocatable.") - m.data.SetUnit("{threads}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidJettyThreadPoolMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJettyThreadPoolMax) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJettyThreadPoolMax) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJettyThreadPoolMax(cfg MetricConfig) metricApachedruidJettyThreadPoolMax { - m := metricApachedruidJettyThreadPoolMax{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJettyThreadPoolMin struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jetty.thread_pool.min metric with initial data. -func (m *metricApachedruidJettyThreadPoolMin) init() { - m.data.SetName("apachedruid.jetty.thread_pool.min") - m.data.SetDescription("Number of minimum threads allocatable.") - m.data.SetUnit("{threads}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidJettyThreadPoolMin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJettyThreadPoolMin) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJettyThreadPoolMin) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJettyThreadPoolMin(cfg MetricConfig) metricApachedruidJettyThreadPoolMin { - m := metricApachedruidJettyThreadPoolMin{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJettyThreadPoolQueueSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jetty.thread_pool.queue_size metric with initial data. -func (m *metricApachedruidJettyThreadPoolQueueSize) init() { - m.data.SetName("apachedruid.jetty.thread_pool.queue_size") - m.data.SetDescription("Size of the worker queue.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidJettyThreadPoolQueueSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJettyThreadPoolQueueSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJettyThreadPoolQueueSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJettyThreadPoolQueueSize(cfg MetricConfig) metricApachedruidJettyThreadPoolQueueSize { - m := metricApachedruidJettyThreadPoolQueueSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJettyThreadPoolTotal struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jetty.thread_pool.total metric with initial data. -func (m *metricApachedruidJettyThreadPoolTotal) init() { - m.data.SetName("apachedruid.jetty.thread_pool.total") - m.data.SetDescription("Number of total workable threads allocated.") - m.data.SetUnit("{threads}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidJettyThreadPoolTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJettyThreadPoolTotal) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJettyThreadPoolTotal) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJettyThreadPoolTotal(cfg MetricConfig) metricApachedruidJettyThreadPoolTotal { - m := metricApachedruidJettyThreadPoolTotal{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmBufferpoolCapacity struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.bufferpool.capacity metric with initial data. -func (m *metricApachedruidJvmBufferpoolCapacity) init() { - m.data.SetName("apachedruid.jvm.bufferpool.capacity") - m.data.SetDescription("Bufferpool capacity.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmBufferpoolCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("bufferpool_name", jvmBufferpoolNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmBufferpoolCapacity) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmBufferpoolCapacity) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmBufferpoolCapacity(cfg MetricConfig) metricApachedruidJvmBufferpoolCapacity { - m := metricApachedruidJvmBufferpoolCapacity{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmBufferpoolCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.bufferpool.count metric with initial data. -func (m *metricApachedruidJvmBufferpoolCount) init() { - m.data.SetName("apachedruid.jvm.bufferpool.count") - m.data.SetDescription("Bufferpool count.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmBufferpoolCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("bufferpool_name", jvmBufferpoolNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmBufferpoolCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmBufferpoolCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmBufferpoolCount(cfg MetricConfig) metricApachedruidJvmBufferpoolCount { - m := metricApachedruidJvmBufferpoolCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmBufferpoolUsed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.bufferpool.used metric with initial data. -func (m *metricApachedruidJvmBufferpoolUsed) init() { - m.data.SetName("apachedruid.jvm.bufferpool.used") - m.data.SetDescription("Bufferpool used.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmBufferpoolUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("bufferpool_name", jvmBufferpoolNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmBufferpoolUsed) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmBufferpoolUsed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmBufferpoolUsed(cfg MetricConfig) metricApachedruidJvmBufferpoolUsed { - m := metricApachedruidJvmBufferpoolUsed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmGcCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.gc.count metric with initial data. -func (m *metricApachedruidJvmGcCount) init() { - m.data.SetName("apachedruid.jvm.gc.count") - m.data.SetDescription("Garbage collection count.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmGcCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmGcGenAttributeValue string, jvmGcNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("gc_gen", jvmGcGenAttributeValue) - dp.Attributes().PutStr("gc_name", jvmGcNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmGcCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmGcCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmGcCount(cfg MetricConfig) metricApachedruidJvmGcCount { - m := metricApachedruidJvmGcCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmGcCPU struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.gc.cpu metric with initial data. -func (m *metricApachedruidJvmGcCPU) init() { - m.data.SetName("apachedruid.jvm.gc.cpu") - m.data.SetDescription("Count of CPU time in Nanoseconds spent on garbage collection. Note, `jvm/gc/cpu` represents the total time over multiple GC cycles; divide by `jvm/gc/count` to get the mean GC time per cycle.") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmGcCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmGcGenAttributeValue string, jvmGcNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("gc_gen", jvmGcGenAttributeValue) - dp.Attributes().PutStr("gc_name", jvmGcNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmGcCPU) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmGcCPU) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmGcCPU(cfg MetricConfig) metricApachedruidJvmGcCPU { - m := metricApachedruidJvmGcCPU{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmMemCommitted struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.mem.committed metric with initial data. -func (m *metricApachedruidJvmMemCommitted) init() { - m.data.SetName("apachedruid.jvm.mem.committed") - m.data.SetDescription("Committed memory.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmMemCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("mem_kind", jvmMemKindAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmMemCommitted) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmMemCommitted) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmMemCommitted(cfg MetricConfig) metricApachedruidJvmMemCommitted { - m := metricApachedruidJvmMemCommitted{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmMemInit struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.mem.init metric with initial data. -func (m *metricApachedruidJvmMemInit) init() { - m.data.SetName("apachedruid.jvm.mem.init") - m.data.SetDescription("Initial memory.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmMemInit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("mem_kind", jvmMemKindAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmMemInit) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmMemInit) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmMemInit(cfg MetricConfig) metricApachedruidJvmMemInit { - m := metricApachedruidJvmMemInit{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmMemMax struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.mem.max metric with initial data. -func (m *metricApachedruidJvmMemMax) init() { - m.data.SetName("apachedruid.jvm.mem.max") - m.data.SetDescription("Max memory.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmMemMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("mem_kind", jvmMemKindAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmMemMax) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmMemMax) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmMemMax(cfg MetricConfig) metricApachedruidJvmMemMax { - m := metricApachedruidJvmMemMax{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmMemUsed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.mem.used metric with initial data. -func (m *metricApachedruidJvmMemUsed) init() { - m.data.SetName("apachedruid.jvm.mem.used") - m.data.SetDescription("Used memory.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmMemUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("mem_kind", jvmMemKindAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmMemUsed) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmMemUsed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmMemUsed(cfg MetricConfig) metricApachedruidJvmMemUsed { - m := metricApachedruidJvmMemUsed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmPoolCommitted struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.pool.committed metric with initial data. -func (m *metricApachedruidJvmPoolCommitted) init() { - m.data.SetName("apachedruid.jvm.pool.committed") - m.data.SetDescription("Committed pool.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmPoolCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("pool_name", jvmPoolNameAttributeValue) - dp.Attributes().PutStr("pool_kind", jvmPoolKindAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmPoolCommitted) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmPoolCommitted) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmPoolCommitted(cfg MetricConfig) metricApachedruidJvmPoolCommitted { - m := metricApachedruidJvmPoolCommitted{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmPoolInit struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.pool.init metric with initial data. -func (m *metricApachedruidJvmPoolInit) init() { - m.data.SetName("apachedruid.jvm.pool.init") - m.data.SetDescription("Initial pool.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmPoolInit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("pool_name", jvmPoolNameAttributeValue) - dp.Attributes().PutStr("pool_kind", jvmPoolKindAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmPoolInit) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmPoolInit) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmPoolInit(cfg MetricConfig) metricApachedruidJvmPoolInit { - m := metricApachedruidJvmPoolInit{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmPoolMax struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.pool.max metric with initial data. -func (m *metricApachedruidJvmPoolMax) init() { - m.data.SetName("apachedruid.jvm.pool.max") - m.data.SetDescription("Max pool.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmPoolMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("pool_name", jvmPoolNameAttributeValue) - dp.Attributes().PutStr("pool_kind", jvmPoolKindAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmPoolMax) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmPoolMax) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmPoolMax(cfg MetricConfig) metricApachedruidJvmPoolMax { - m := metricApachedruidJvmPoolMax{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidJvmPoolUsed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.jvm.pool.used metric with initial data. -func (m *metricApachedruidJvmPoolUsed) init() { - m.data.SetName("apachedruid.jvm.pool.used") - m.data.SetDescription("Pool used.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidJvmPoolUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("pool_name", jvmPoolNameAttributeValue) - dp.Attributes().PutStr("pool_kind", jvmPoolKindAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidJvmPoolUsed) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidJvmPoolUsed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidJvmPoolUsed(cfg MetricConfig) metricApachedruidJvmPoolUsed { - m := metricApachedruidJvmPoolUsed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidKillPendingSegmentsCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.kill.pending_segments.count metric with initial data. -func (m *metricApachedruidKillPendingSegmentsCount) init() { - m.data.SetName("apachedruid.kill.pending_segments.count") - m.data.SetDescription("Number of stale pending segments deleted from the metadata store.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidKillPendingSegmentsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, killDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", killDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidKillPendingSegmentsCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidKillPendingSegmentsCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidKillPendingSegmentsCount(cfg MetricConfig) metricApachedruidKillPendingSegmentsCount { - m := metricApachedruidKillPendingSegmentsCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidKillTaskCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.kill.task.count metric with initial data. -func (m *metricApachedruidKillTaskCount) init() { - m.data.SetName("apachedruid.kill.task.count") - m.data.SetDescription("Number of tasks issued in the auto kill run.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidKillTaskCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidKillTaskCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidKillTaskCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidKillTaskCount(cfg MetricConfig) metricApachedruidKillTaskCount { - m := metricApachedruidKillTaskCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidKillTaskAvailableSlotCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.kill_task.available_slot.count metric with initial data. -func (m *metricApachedruidKillTaskAvailableSlotCount) init() { - m.data.SetName("apachedruid.kill_task.available_slot.count") - m.data.SetDescription("Number of available task slots that can be used for auto kill tasks in the auto kill run. This is the max number of task slots minus any currently running auto kill tasks.") - m.data.SetUnit("{slots}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidKillTaskAvailableSlotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidKillTaskAvailableSlotCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidKillTaskAvailableSlotCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidKillTaskAvailableSlotCount(cfg MetricConfig) metricApachedruidKillTaskAvailableSlotCount { - m := metricApachedruidKillTaskAvailableSlotCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidKillTaskMaxSlotCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.kill_task.max_slot.count metric with initial data. -func (m *metricApachedruidKillTaskMaxSlotCount) init() { - m.data.SetName("apachedruid.kill_task.max_slot.count") - m.data.SetDescription("Maximum number of task slots available for auto kill tasks in the auto kill run.") - m.data.SetUnit("{slots}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidKillTaskMaxSlotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidKillTaskMaxSlotCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidKillTaskMaxSlotCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidKillTaskMaxSlotCount(cfg MetricConfig) metricApachedruidKillTaskMaxSlotCount { - m := metricApachedruidKillTaskMaxSlotCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMergeBufferPendingRequests struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.merge_buffer.pending_requests metric with initial data. -func (m *metricApachedruidMergeBufferPendingRequests) init() { - m.data.SetName("apachedruid.merge_buffer.pending_requests") - m.data.SetDescription("Number of requests waiting to acquire a batch of buffers from the merge buffer pool.") - m.data.SetUnit("{requests}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidMergeBufferPendingRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMergeBufferPendingRequests) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMergeBufferPendingRequests) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMergeBufferPendingRequests(cfg MetricConfig) metricApachedruidMergeBufferPendingRequests { - m := metricApachedruidMergeBufferPendingRequests{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMetadataKillAuditCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.metadata.kill.audit.count metric with initial data. -func (m *metricApachedruidMetadataKillAuditCount) init() { - m.data.SetName("apachedruid.metadata.kill.audit.count") - m.data.SetDescription("Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. This metric is emitted only when `druid.coordinator.kill.audit.on` is set to true.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidMetadataKillAuditCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMetadataKillAuditCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMetadataKillAuditCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMetadataKillAuditCount(cfg MetricConfig) metricApachedruidMetadataKillAuditCount { - m := metricApachedruidMetadataKillAuditCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMetadataKillCompactionCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.metadata.kill.compaction.count metric with initial data. -func (m *metricApachedruidMetadataKillCompactionCount) init() { - m.data.SetName("apachedruid.metadata.kill.compaction.count") - m.data.SetDescription("Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. This metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidMetadataKillCompactionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMetadataKillCompactionCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMetadataKillCompactionCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMetadataKillCompactionCount(cfg MetricConfig) metricApachedruidMetadataKillCompactionCount { - m := metricApachedruidMetadataKillCompactionCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMetadataKillDatasourceCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.metadata.kill.datasource.count metric with initial data. -func (m *metricApachedruidMetadataKillDatasourceCount) init() { - m.data.SetName("apachedruid.metadata.kill.datasource.count") - m.data.SetDescription("Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run. Note that datasource metadata only exists for datasource created from supervisor. This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidMetadataKillDatasourceCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMetadataKillDatasourceCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMetadataKillDatasourceCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMetadataKillDatasourceCount(cfg MetricConfig) metricApachedruidMetadataKillDatasourceCount { - m := metricApachedruidMetadataKillDatasourceCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMetadataKillRuleCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.metadata.kill.rule.count metric with initial data. -func (m *metricApachedruidMetadataKillRuleCount) init() { - m.data.SetName("apachedruid.metadata.kill.rule.count") - m.data.SetDescription("Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.rule.on` is set to true.") - m.data.SetUnit("{rules}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidMetadataKillRuleCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMetadataKillRuleCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMetadataKillRuleCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMetadataKillRuleCount(cfg MetricConfig) metricApachedruidMetadataKillRuleCount { - m := metricApachedruidMetadataKillRuleCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMetadataKillSupervisorCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.metadata.kill.supervisor.count metric with initial data. -func (m *metricApachedruidMetadataKillSupervisorCount) init() { - m.data.SetName("apachedruid.metadata.kill.supervisor.count") - m.data.SetDescription("Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true.") - m.data.SetUnit("{supervisors}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidMetadataKillSupervisorCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMetadataKillSupervisorCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMetadataKillSupervisorCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMetadataKillSupervisorCount(cfg MetricConfig) metricApachedruidMetadataKillSupervisorCount { - m := metricApachedruidMetadataKillSupervisorCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMetadatacacheInitTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.metadatacache.init.time metric with initial data. -func (m *metricApachedruidMetadatacacheInitTime) init() { - m.data.SetName("apachedruid.metadatacache.init.time") - m.data.SetDescription("Time taken to initialize the broker segment metadata cache. Useful to detect if brokers are taking too long to start.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidMetadatacacheInitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMetadatacacheInitTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMetadatacacheInitTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMetadatacacheInitTime(cfg MetricConfig) metricApachedruidMetadatacacheInitTime { - m := metricApachedruidMetadatacacheInitTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMetadatacacheRefreshCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.metadatacache.refresh.count metric with initial data. -func (m *metricApachedruidMetadatacacheRefreshCount) init() { - m.data.SetName("apachedruid.metadatacache.refresh.count") - m.data.SetDescription("Number of segments to refresh in broker segment metadata cache.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidMetadatacacheRefreshCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMetadatacacheRefreshCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMetadatacacheRefreshCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMetadatacacheRefreshCount(cfg MetricConfig) metricApachedruidMetadatacacheRefreshCount { - m := metricApachedruidMetadatacacheRefreshCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidMetadatacacheRefreshTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.metadatacache.refresh.time metric with initial data. -func (m *metricApachedruidMetadatacacheRefreshTime) init() { - m.data.SetName("apachedruid.metadatacache.refresh.time") - m.data.SetDescription("Time taken to refresh segments in broker segment metadata cache.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidMetadatacacheRefreshTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidMetadatacacheRefreshTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidMetadatacacheRefreshTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidMetadatacacheRefreshTime(cfg MetricConfig) metricApachedruidMetadatacacheRefreshTime { - m := metricApachedruidMetadatacacheRefreshTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryByteLimitExceededCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.byte_limit.exceeded.count metric with initial data. -func (m *metricApachedruidQueryByteLimitExceededCount) init() { - m.data.SetName("apachedruid.query.byte_limit.exceeded.count") - m.data.SetDescription("Number of queries whose inlined subquery results exceeded the given byte limit.") - m.data.SetUnit("{queries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryByteLimitExceededCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryByteLimitExceededCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryByteLimitExceededCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryByteLimitExceededCount(cfg MetricConfig) metricApachedruidQueryByteLimitExceededCount { - m := metricApachedruidQueryByteLimitExceededCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.bytes metric with initial data. -func (m *metricApachedruidQueryBytes) init() { - m.data.SetName("apachedruid.query.bytes") - m.data.SetDescription("The total number of bytes returned to the requesting client in the query response from the broker. Other services report the total bytes for their portion of the query.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", queryDataSourceAttributeValue) - dp.Attributes().PutStr("num_metrics", queryNumMetricsAttributeValue) - dp.Attributes().PutStr("dimension", queryDimensionAttributeValue) - dp.Attributes().PutStr("has_filters", queryHasFiltersAttributeValue) - dp.Attributes().PutInt("threshold", queryThresholdAttributeValue) - dp.Attributes().PutInt("num_complex_metrics", queryNumComplexMetricsAttributeValue) - dp.Attributes().PutStr("type", queryTypeAttributeValue) - dp.Attributes().PutStr("remote_address", queryRemoteAddressAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) - dp.Attributes().PutStr("context", queryContextAttributeValue) - dp.Attributes().PutStr("num_dimensions", queryNumDimensionsAttributeValue) - dp.Attributes().PutStr("interval", queryIntervalAttributeValue) - dp.Attributes().PutStr("duration", queryDurationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryBytes(cfg MetricConfig) metricApachedruidQueryBytes { - m := metricApachedruidQueryBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaAverageBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.average_bytes metric with initial data. -func (m *metricApachedruidQueryCacheDeltaAverageBytes) init() { - m.data.SetName("apachedruid.query.cache.delta.average_bytes") - m.data.SetDescription("Average cache entry byte size.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaAverageBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaAverageBytes) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaAverageBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaAverageBytes(cfg MetricConfig) metricApachedruidQueryCacheDeltaAverageBytes { - m := metricApachedruidQueryCacheDeltaAverageBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaErrors struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.errors metric with initial data. -func (m *metricApachedruidQueryCacheDeltaErrors) init() { - m.data.SetName("apachedruid.query.cache.delta.errors") - m.data.SetDescription("Number of cache errors.") - m.data.SetUnit("{errors}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaErrors) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaErrors) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaErrors(cfg MetricConfig) metricApachedruidQueryCacheDeltaErrors { - m := metricApachedruidQueryCacheDeltaErrors{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaEvictions struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.evictions metric with initial data. -func (m *metricApachedruidQueryCacheDeltaEvictions) init() { - m.data.SetName("apachedruid.query.cache.delta.evictions") - m.data.SetDescription("Number of cache evictions.") - m.data.SetUnit("{evictions}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaEvictions) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaEvictions) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaEvictions(cfg MetricConfig) metricApachedruidQueryCacheDeltaEvictions { - m := metricApachedruidQueryCacheDeltaEvictions{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaHitRate struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.hit_rate metric with initial data. -func (m *metricApachedruidQueryCacheDeltaHitRate) init() { - m.data.SetName("apachedruid.query.cache.delta.hit_rate") - m.data.SetDescription("Cache hit rate.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaHitRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaHitRate) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaHitRate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaHitRate(cfg MetricConfig) metricApachedruidQueryCacheDeltaHitRate { - m := metricApachedruidQueryCacheDeltaHitRate{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaHits struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.hits metric with initial data. -func (m *metricApachedruidQueryCacheDeltaHits) init() { - m.data.SetName("apachedruid.query.cache.delta.hits") - m.data.SetDescription("Number of cache hits.") - m.data.SetUnit("{hits}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaHits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaHits) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaHits) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaHits(cfg MetricConfig) metricApachedruidQueryCacheDeltaHits { - m := metricApachedruidQueryCacheDeltaHits{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaMisses struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.misses metric with initial data. -func (m *metricApachedruidQueryCacheDeltaMisses) init() { - m.data.SetName("apachedruid.query.cache.delta.misses") - m.data.SetDescription("Number of cache misses.") - m.data.SetUnit("{misses}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaMisses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaMisses) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaMisses) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaMisses(cfg MetricConfig) metricApachedruidQueryCacheDeltaMisses { - m := metricApachedruidQueryCacheDeltaMisses{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaNumEntries struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.num_entries metric with initial data. -func (m *metricApachedruidQueryCacheDeltaNumEntries) init() { - m.data.SetName("apachedruid.query.cache.delta.num_entries") - m.data.SetDescription("Number of cache entries.") - m.data.SetUnit("{entries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaNumEntries) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaNumEntries) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaNumEntries) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaNumEntries(cfg MetricConfig) metricApachedruidQueryCacheDeltaNumEntries { - m := metricApachedruidQueryCacheDeltaNumEntries{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaPutError struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.put.error metric with initial data. -func (m *metricApachedruidQueryCacheDeltaPutError) init() { - m.data.SetName("apachedruid.query.cache.delta.put.error") - m.data.SetDescription("Number of new cache entries that could not be cached due to errors.") - m.data.SetUnit("{errors}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaPutError) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaPutError) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaPutError) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaPutError(cfg MetricConfig) metricApachedruidQueryCacheDeltaPutError { - m := metricApachedruidQueryCacheDeltaPutError{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaPutOk struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.put.ok metric with initial data. -func (m *metricApachedruidQueryCacheDeltaPutOk) init() { - m.data.SetName("apachedruid.query.cache.delta.put.ok") - m.data.SetDescription("Number of new cache entries successfully cached.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaPutOk) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaPutOk) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaPutOk) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaPutOk(cfg MetricConfig) metricApachedruidQueryCacheDeltaPutOk { - m := metricApachedruidQueryCacheDeltaPutOk{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaPutOversized struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.put.oversized metric with initial data. -func (m *metricApachedruidQueryCacheDeltaPutOversized) init() { - m.data.SetName("apachedruid.query.cache.delta.put.oversized") - m.data.SetDescription("Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties).") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaPutOversized) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaPutOversized) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaPutOversized) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaPutOversized(cfg MetricConfig) metricApachedruidQueryCacheDeltaPutOversized { - m := metricApachedruidQueryCacheDeltaPutOversized{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaSizeBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.size_bytes metric with initial data. -func (m *metricApachedruidQueryCacheDeltaSizeBytes) init() { - m.data.SetName("apachedruid.query.cache.delta.size_bytes") - m.data.SetDescription("Size in bytes of cache entries.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaSizeBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaSizeBytes) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaSizeBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaSizeBytes(cfg MetricConfig) metricApachedruidQueryCacheDeltaSizeBytes { - m := metricApachedruidQueryCacheDeltaSizeBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheDeltaTimeouts struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.delta.timeouts metric with initial data. -func (m *metricApachedruidQueryCacheDeltaTimeouts) init() { - m.data.SetName("apachedruid.query.cache.delta.timeouts") - m.data.SetDescription("Number of cache timeouts.") - m.data.SetUnit("{timeouts}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheDeltaTimeouts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheDeltaTimeouts) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheDeltaTimeouts) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheDeltaTimeouts(cfg MetricConfig) metricApachedruidQueryCacheDeltaTimeouts { - m := metricApachedruidQueryCacheDeltaTimeouts{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheMemcachedDelta struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.memcached.delta metric with initial data. -func (m *metricApachedruidQueryCacheMemcachedDelta) init() { - m.data.SetName("apachedruid.query.cache.memcached.delta") - m.data.SetDescription("Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their delta from the prior event emission.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCacheMemcachedDelta) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheMemcachedDelta) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheMemcachedDelta) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheMemcachedDelta(cfg MetricConfig) metricApachedruidQueryCacheMemcachedDelta { - m := metricApachedruidQueryCacheMemcachedDelta{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheMemcachedTotal struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.memcached.total metric with initial data. -func (m *metricApachedruidQueryCacheMemcachedTotal) init() { - m.data.SetName("apachedruid.query.cache.memcached.total") - m.data.SetDescription("Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their actual values.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheMemcachedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheMemcachedTotal) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheMemcachedTotal) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheMemcachedTotal(cfg MetricConfig) metricApachedruidQueryCacheMemcachedTotal { - m := metricApachedruidQueryCacheMemcachedTotal{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalAverageBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.average_bytes metric with initial data. -func (m *metricApachedruidQueryCacheTotalAverageBytes) init() { - m.data.SetName("apachedruid.query.cache.total.average_bytes") - m.data.SetDescription("Average cache entry byte size.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalAverageBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalAverageBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalAverageBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalAverageBytes(cfg MetricConfig) metricApachedruidQueryCacheTotalAverageBytes { - m := metricApachedruidQueryCacheTotalAverageBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalErrors struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.errors metric with initial data. -func (m *metricApachedruidQueryCacheTotalErrors) init() { - m.data.SetName("apachedruid.query.cache.total.errors") - m.data.SetDescription("Number of cache errors.") - m.data.SetUnit("{errors}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalErrors) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalErrors) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalErrors(cfg MetricConfig) metricApachedruidQueryCacheTotalErrors { - m := metricApachedruidQueryCacheTotalErrors{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalEvictions struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.evictions metric with initial data. -func (m *metricApachedruidQueryCacheTotalEvictions) init() { - m.data.SetName("apachedruid.query.cache.total.evictions") - m.data.SetDescription("Number of cache evictions.") - m.data.SetUnit("{evictions}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalEvictions) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalEvictions) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalEvictions(cfg MetricConfig) metricApachedruidQueryCacheTotalEvictions { - m := metricApachedruidQueryCacheTotalEvictions{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalHitRate struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.hit_rate metric with initial data. -func (m *metricApachedruidQueryCacheTotalHitRate) init() { - m.data.SetName("apachedruid.query.cache.total.hit_rate") - m.data.SetDescription("Cache hit rate.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalHitRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalHitRate) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalHitRate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalHitRate(cfg MetricConfig) metricApachedruidQueryCacheTotalHitRate { - m := metricApachedruidQueryCacheTotalHitRate{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalHits struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.hits metric with initial data. -func (m *metricApachedruidQueryCacheTotalHits) init() { - m.data.SetName("apachedruid.query.cache.total.hits") - m.data.SetDescription("Number of cache hits.") - m.data.SetUnit("{hits}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalHits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalHits) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalHits) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalHits(cfg MetricConfig) metricApachedruidQueryCacheTotalHits { - m := metricApachedruidQueryCacheTotalHits{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalMisses struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.misses metric with initial data. -func (m *metricApachedruidQueryCacheTotalMisses) init() { - m.data.SetName("apachedruid.query.cache.total.misses") - m.data.SetDescription("Number of cache misses.") - m.data.SetUnit("{misses}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalMisses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalMisses) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalMisses) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalMisses(cfg MetricConfig) metricApachedruidQueryCacheTotalMisses { - m := metricApachedruidQueryCacheTotalMisses{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalNumEntries struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.num_entries metric with initial data. -func (m *metricApachedruidQueryCacheTotalNumEntries) init() { - m.data.SetName("apachedruid.query.cache.total.num_entries") - m.data.SetDescription("Number of cache entries.") - m.data.SetUnit("{entries}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalNumEntries) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalNumEntries) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalNumEntries) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalNumEntries(cfg MetricConfig) metricApachedruidQueryCacheTotalNumEntries { - m := metricApachedruidQueryCacheTotalNumEntries{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalPutError struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.put.error metric with initial data. -func (m *metricApachedruidQueryCacheTotalPutError) init() { - m.data.SetName("apachedruid.query.cache.total.put.error") - m.data.SetDescription("Number of new cache entries that could not be cached due to errors.") - m.data.SetUnit("{errors}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalPutError) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalPutError) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalPutError) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalPutError(cfg MetricConfig) metricApachedruidQueryCacheTotalPutError { - m := metricApachedruidQueryCacheTotalPutError{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalPutOk struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.put.ok metric with initial data. -func (m *metricApachedruidQueryCacheTotalPutOk) init() { - m.data.SetName("apachedruid.query.cache.total.put.ok") - m.data.SetDescription("Number of new cache entries successfully cached.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalPutOk) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalPutOk) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalPutOk) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalPutOk(cfg MetricConfig) metricApachedruidQueryCacheTotalPutOk { - m := metricApachedruidQueryCacheTotalPutOk{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalPutOversized struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.put.oversized metric with initial data. -func (m *metricApachedruidQueryCacheTotalPutOversized) init() { - m.data.SetName("apachedruid.query.cache.total.put.oversized") - m.data.SetDescription("Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties).") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalPutOversized) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalPutOversized) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalPutOversized) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalPutOversized(cfg MetricConfig) metricApachedruidQueryCacheTotalPutOversized { - m := metricApachedruidQueryCacheTotalPutOversized{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalSizeBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.size_bytes metric with initial data. -func (m *metricApachedruidQueryCacheTotalSizeBytes) init() { - m.data.SetName("apachedruid.query.cache.total.size_bytes") - m.data.SetDescription("Size in bytes of cache entries.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalSizeBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalSizeBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalSizeBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalSizeBytes(cfg MetricConfig) metricApachedruidQueryCacheTotalSizeBytes { - m := metricApachedruidQueryCacheTotalSizeBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCacheTotalTimeouts struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cache.total.timeouts metric with initial data. -func (m *metricApachedruidQueryCacheTotalTimeouts) init() { - m.data.SetName("apachedruid.query.cache.total.timeouts") - m.data.SetDescription("Number of cache timeouts.") - m.data.SetUnit("{timeouts}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidQueryCacheTotalTimeouts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCacheTotalTimeouts) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCacheTotalTimeouts) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCacheTotalTimeouts(cfg MetricConfig) metricApachedruidQueryCacheTotalTimeouts { - m := metricApachedruidQueryCacheTotalTimeouts{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.count metric with initial data. -func (m *metricApachedruidQueryCount) init() { - m.data.SetName("apachedruid.query.count") - m.data.SetDescription("Number of total queries.") - m.data.SetUnit("{queries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCount(cfg MetricConfig) metricApachedruidQueryCount { - m := metricApachedruidQueryCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryCPUTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.cpu.time metric with initial data. -func (m *metricApachedruidQueryCPUTime) init() { - m.data.SetName("apachedruid.query.cpu.time") - m.data.SetDescription("Microseconds of CPU time taken to complete a query.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", queryDataSourceAttributeValue) - dp.Attributes().PutStr("num_metrics", queryNumMetricsAttributeValue) - dp.Attributes().PutStr("dimension", queryDimensionAttributeValue) - dp.Attributes().PutStr("has_filters", queryHasFiltersAttributeValue) - dp.Attributes().PutInt("threshold", queryThresholdAttributeValue) - dp.Attributes().PutInt("num_complex_metrics", queryNumComplexMetricsAttributeValue) - dp.Attributes().PutStr("type", queryTypeAttributeValue) - dp.Attributes().PutStr("remote_address", queryRemoteAddressAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) - dp.Attributes().PutStr("context", queryContextAttributeValue) - dp.Attributes().PutStr("num_dimensions", queryNumDimensionsAttributeValue) - dp.Attributes().PutStr("interval", queryIntervalAttributeValue) - dp.Attributes().PutStr("duration", queryDurationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryCPUTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryCPUTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryCPUTime(cfg MetricConfig) metricApachedruidQueryCPUTime { - m := metricApachedruidQueryCPUTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryFailedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.failed.count metric with initial data. -func (m *metricApachedruidQueryFailedCount) init() { - m.data.SetName("apachedruid.query.failed.count") - m.data.SetDescription("Number of failed queries.") - m.data.SetUnit("{queries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryFailedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryFailedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryFailedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryFailedCount(cfg MetricConfig) metricApachedruidQueryFailedCount { - m := metricApachedruidQueryFailedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryInterruptedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.interrupted.count metric with initial data. -func (m *metricApachedruidQueryInterruptedCount) init() { - m.data.SetName("apachedruid.query.interrupted.count") - m.data.SetDescription("Number of queries interrupted due to cancellation.") - m.data.SetUnit("{queries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryInterruptedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryInterruptedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryInterruptedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryInterruptedCount(cfg MetricConfig) metricApachedruidQueryInterruptedCount { - m := metricApachedruidQueryInterruptedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryNodeBackpressure struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.node.backpressure metric with initial data. -func (m *metricApachedruidQueryNodeBackpressure) init() { - m.data.SetName("apachedruid.query.node.backpressure") - m.data.SetDescription("Milliseconds that the channel to this process has spent suspended due to backpressure.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryNodeBackpressure) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("status", queryStatusAttributeValue) - dp.Attributes().PutStr("server", queryServerAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryNodeBackpressure) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryNodeBackpressure) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryNodeBackpressure(cfg MetricConfig) metricApachedruidQueryNodeBackpressure { - m := metricApachedruidQueryNodeBackpressure{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryNodeBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.node.bytes metric with initial data. -func (m *metricApachedruidQueryNodeBytes) init() { - m.data.SetName("apachedruid.query.node.bytes") - m.data.SetDescription("Number of bytes returned from querying individual historical/realtime processes.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryNodeBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("status", queryStatusAttributeValue) - dp.Attributes().PutStr("server", queryServerAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryNodeBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryNodeBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryNodeBytes(cfg MetricConfig) metricApachedruidQueryNodeBytes { - m := metricApachedruidQueryNodeBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryNodeTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.node.time metric with initial data. -func (m *metricApachedruidQueryNodeTime) init() { - m.data.SetName("apachedruid.query.node.time") - m.data.SetDescription("Milliseconds taken to query individual historical/realtime processes.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryNodeTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("status", queryStatusAttributeValue) - dp.Attributes().PutStr("server", queryServerAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryNodeTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryNodeTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryNodeTime(cfg MetricConfig) metricApachedruidQueryNodeTime { - m := metricApachedruidQueryNodeTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryNodeTtfb struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.node.ttfb metric with initial data. -func (m *metricApachedruidQueryNodeTtfb) init() { - m.data.SetName("apachedruid.query.node.ttfb") - m.data.SetDescription("Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryNodeTtfb) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("status", queryStatusAttributeValue) - dp.Attributes().PutStr("server", queryServerAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryNodeTtfb) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryNodeTtfb) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryNodeTtfb(cfg MetricConfig) metricApachedruidQueryNodeTtfb { - m := metricApachedruidQueryNodeTtfb{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryPriority struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.priority metric with initial data. -func (m *metricApachedruidQueryPriority) init() { - m.data.SetName("apachedruid.query.priority") - m.data.SetDescription("Assigned lane and priority, only if Laning strategy is enabled. Refer to [Laning strategies](https,//druid.apache.org/docs/latest/configuration#laning-strategies).") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryPriority) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryTypeAttributeValue string, queryDataSourceAttributeValue string, queryLaneAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("type", queryTypeAttributeValue) - dp.Attributes().PutStr("data_source", queryDataSourceAttributeValue) - dp.Attributes().PutStr("lane", queryLaneAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryPriority) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryPriority) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryPriority(cfg MetricConfig) metricApachedruidQueryPriority { - m := metricApachedruidQueryPriority{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryRowLimitExceededCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.row_limit.exceeded.count metric with initial data. -func (m *metricApachedruidQueryRowLimitExceededCount) init() { - m.data.SetName("apachedruid.query.row_limit.exceeded.count") - m.data.SetDescription("Number of queries whose inlined subquery results exceeded the given row limit.") - m.data.SetUnit("{queries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryRowLimitExceededCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryRowLimitExceededCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryRowLimitExceededCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryRowLimitExceededCount(cfg MetricConfig) metricApachedruidQueryRowLimitExceededCount { - m := metricApachedruidQueryRowLimitExceededCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQuerySegmentTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.segment.time metric with initial data. -func (m *metricApachedruidQuerySegmentTime) init() { - m.data.SetName("apachedruid.query.segment.time") - m.data.SetDescription("Milliseconds taken to query individual segment. Includes time to page in the segment from disk.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQuerySegmentTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, querySegmentAttributeValue string, queryIDAttributeValue string, queryVectorizedAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("status", queryStatusAttributeValue) - dp.Attributes().PutStr("segment", querySegmentAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) - dp.Attributes().PutStr("vectorized", queryVectorizedAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQuerySegmentTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQuerySegmentTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQuerySegmentTime(cfg MetricConfig) metricApachedruidQuerySegmentTime { - m := metricApachedruidQuerySegmentTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQuerySegmentAndCacheTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.segment_and_cache.time metric with initial data. -func (m *metricApachedruidQuerySegmentAndCacheTime) init() { - m.data.SetName("apachedruid.query.segment_and_cache.time") - m.data.SetDescription("Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process).") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQuerySegmentAndCacheTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, querySegmentAttributeValue string, queryIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("segment", querySegmentAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQuerySegmentAndCacheTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQuerySegmentAndCacheTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQuerySegmentAndCacheTime(cfg MetricConfig) metricApachedruidQuerySegmentAndCacheTime { - m := metricApachedruidQuerySegmentAndCacheTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQuerySegmentsCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.segments.count metric with initial data. -func (m *metricApachedruidQuerySegmentsCount) init() { - m.data.SetName("apachedruid.query.segments.count") - m.data.SetDescription("This metric is not enabled by default. See the `QueryMetrics` Interface for reference regarding enabling this metric. Number of segments that will be touched by the query. In the broker, it makes a plan to distribute the query to realtime tasks and historicals based on a snapshot of segment distribution state. If there are some segments moved after this snapshot is created, certain historicals and realtime tasks can report those segments as missing to the broker. The broker will resend the query to the new servers that serve those segments after move. In this case, those segments can be counted more than once in this metric.") - m.data.SetUnit("{segments}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQuerySegmentsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQuerySegmentsCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQuerySegmentsCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQuerySegmentsCount(cfg MetricConfig) metricApachedruidQuerySegmentsCount { - m := metricApachedruidQuerySegmentsCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQuerySuccessCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.success.count metric with initial data. -func (m *metricApachedruidQuerySuccessCount) init() { - m.data.SetName("apachedruid.query.success.count") - m.data.SetDescription("Number of queries successfully processed.") - m.data.SetUnit("{queries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQuerySuccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQuerySuccessCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQuerySuccessCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQuerySuccessCount(cfg MetricConfig) metricApachedruidQuerySuccessCount { - m := metricApachedruidQuerySuccessCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.time metric with initial data. -func (m *metricApachedruidQueryTime) init() { - m.data.SetName("apachedruid.query.time") - m.data.SetDescription("Milliseconds taken to complete a query.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", queryDataSourceAttributeValue) - dp.Attributes().PutStr("num_metrics", queryNumMetricsAttributeValue) - dp.Attributes().PutStr("dimension", queryDimensionAttributeValue) - dp.Attributes().PutStr("has_filters", queryHasFiltersAttributeValue) - dp.Attributes().PutInt("threshold", queryThresholdAttributeValue) - dp.Attributes().PutInt("num_complex_metrics", queryNumComplexMetricsAttributeValue) - dp.Attributes().PutStr("type", queryTypeAttributeValue) - dp.Attributes().PutStr("remote_address", queryRemoteAddressAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) - dp.Attributes().PutStr("context", queryContextAttributeValue) - dp.Attributes().PutStr("num_dimensions", queryNumDimensionsAttributeValue) - dp.Attributes().PutStr("interval", queryIntervalAttributeValue) - dp.Attributes().PutStr("duration", queryDurationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryTime(cfg MetricConfig) metricApachedruidQueryTime { - m := metricApachedruidQueryTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryTimeoutCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.timeout.count metric with initial data. -func (m *metricApachedruidQueryTimeoutCount) init() { - m.data.SetName("apachedruid.query.timeout.count") - m.data.SetDescription("Number of timed out queries.") - m.data.SetUnit("{queries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidQueryTimeoutCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryTimeoutCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryTimeoutCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryTimeoutCount(cfg MetricConfig) metricApachedruidQueryTimeoutCount { - m := metricApachedruidQueryTimeoutCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidQueryWaitTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.query.wait.time metric with initial data. -func (m *metricApachedruidQueryWaitTime) init() { - m.data.SetName("apachedruid.query.wait.time") - m.data.SetDescription("Milliseconds spent waiting for a segment to be scanned.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidQueryWaitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, querySegmentAttributeValue string, queryIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("segment", querySegmentAttributeValue) - dp.Attributes().PutStr("id", queryIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidQueryWaitTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidQueryWaitTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidQueryWaitTime(cfg MetricConfig) metricApachedruidQueryWaitTime { - m := metricApachedruidQueryWaitTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentAddedBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.added.bytes metric with initial data. -func (m *metricApachedruidSegmentAddedBytes) init() { - m.data.SetName("apachedruid.segment.added.bytes") - m.data.SetDescription("Size in bytes of new segments created.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentAddedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", segmentTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", segmentGroupIDAttributeValue) - dp.Attributes().PutStr("tags", segmentTagsAttributeValue) - dp.Attributes().PutStr("task_id", segmentTaskIDAttributeValue) - dp.Attributes().PutStr("interval", segmentIntervalAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentAddedBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentAddedBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentAddedBytes(cfg MetricConfig) metricApachedruidSegmentAddedBytes { - m := metricApachedruidSegmentAddedBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentAssignSkippedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.assign_skipped.count metric with initial data. -func (m *metricApachedruidSegmentAssignSkippedCount) init() { - m.data.SetName("apachedruid.segment.assign_skipped.count") - m.data.SetDescription("Number of segments that could not be assigned to any server for loading. This can occur due to replication throttling, no available disk space, or a full load queue.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentAssignSkippedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("description", segmentDescriptionAttributeValue) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentAssignSkippedCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentAssignSkippedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentAssignSkippedCount(cfg MetricConfig) metricApachedruidSegmentAssignSkippedCount { - m := metricApachedruidSegmentAssignSkippedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentAssignedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.assigned.count metric with initial data. -func (m *metricApachedruidSegmentAssignedCount) init() { - m.data.SetName("apachedruid.segment.assigned.count") - m.data.SetDescription("Number of segments assigned to be loaded in the cluster.") - m.data.SetUnit("{segments}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentAssignedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentAssignedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentAssignedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentAssignedCount(cfg MetricConfig) metricApachedruidSegmentAssignedCount { - m := metricApachedruidSegmentAssignedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentCompactedBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.compacted.bytes metric with initial data. -func (m *metricApachedruidSegmentCompactedBytes) init() { - m.data.SetName("apachedruid.segment.compacted.bytes") - m.data.SetDescription("Total bytes of this datasource that are already compacted with the spec set in the auto compaction config.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentCompactedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentCompactedBytes) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentCompactedBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentCompactedBytes(cfg MetricConfig) metricApachedruidSegmentCompactedBytes { - m := metricApachedruidSegmentCompactedBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentCompactedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.compacted.count metric with initial data. -func (m *metricApachedruidSegmentCompactedCount) init() { - m.data.SetName("apachedruid.segment.compacted.count") - m.data.SetDescription("Total number of segments of this datasource that are already compacted with the spec set in the auto compaction config.") - m.data.SetUnit("{segments}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentCompactedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentCompactedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentCompactedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentCompactedCount(cfg MetricConfig) metricApachedruidSegmentCompactedCount { - m := metricApachedruidSegmentCompactedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.count metric with initial data. -func (m *metricApachedruidSegmentCount) init() { - m.data.SetName("apachedruid.segment.count") - m.data.SetDescription("Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentCount(cfg MetricConfig) metricApachedruidSegmentCount { - m := metricApachedruidSegmentCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentDeletedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.deleted.count metric with initial data. -func (m *metricApachedruidSegmentDeletedCount) init() { - m.data.SetName("apachedruid.segment.deleted.count") - m.data.SetDescription("Number of segments marked as unused due to drop rules.") - m.data.SetUnit("{segments}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentDeletedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentDeletedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentDeletedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentDeletedCount(cfg MetricConfig) metricApachedruidSegmentDeletedCount { - m := metricApachedruidSegmentDeletedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentDropQueueCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.drop_queue.count metric with initial data. -func (m *metricApachedruidSegmentDropQueueCount) init() { - m.data.SetName("apachedruid.segment.drop_queue.count") - m.data.SetDescription("Number of segments to drop.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentDropQueueCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("server", segmentServerAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentDropQueueCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentDropQueueCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentDropQueueCount(cfg MetricConfig) metricApachedruidSegmentDropQueueCount { - m := metricApachedruidSegmentDropQueueCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentDropSkippedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.drop_skipped.count metric with initial data. -func (m *metricApachedruidSegmentDropSkippedCount) init() { - m.data.SetName("apachedruid.segment.drop_skipped.count") - m.data.SetDescription("Number of segments that could not be dropped from any server.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentDropSkippedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("description", segmentDescriptionAttributeValue) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentDropSkippedCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentDropSkippedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentDropSkippedCount(cfg MetricConfig) metricApachedruidSegmentDropSkippedCount { - m := metricApachedruidSegmentDropSkippedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentDroppedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.dropped.count metric with initial data. -func (m *metricApachedruidSegmentDroppedCount) init() { - m.data.SetName("apachedruid.segment.dropped.count") - m.data.SetDescription("Number of segments chosen to be dropped from the cluster due to being over-replicated.") - m.data.SetUnit("{segments}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentDroppedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentDroppedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentDroppedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentDroppedCount(cfg MetricConfig) metricApachedruidSegmentDroppedCount { - m := metricApachedruidSegmentDroppedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentLoadQueueAssigned struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.load_queue.assigned metric with initial data. -func (m *metricApachedruidSegmentLoadQueueAssigned) init() { - m.data.SetName("apachedruid.segment.load_queue.assigned") - m.data.SetDescription("Number of segments assigned for load or drop to the load queue of a server.") - m.data.SetUnit("{segments}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentLoadQueueAssigned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("server", segmentServerAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentLoadQueueAssigned) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentLoadQueueAssigned) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentLoadQueueAssigned(cfg MetricConfig) metricApachedruidSegmentLoadQueueAssigned { - m := metricApachedruidSegmentLoadQueueAssigned{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentLoadQueueCancelled struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.load_queue.cancelled metric with initial data. -func (m *metricApachedruidSegmentLoadQueueCancelled) init() { - m.data.SetName("apachedruid.segment.load_queue.cancelled") - m.data.SetDescription("Number of segment assignments that were canceled before completion.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentLoadQueueCancelled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("server", segmentServerAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentLoadQueueCancelled) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentLoadQueueCancelled) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentLoadQueueCancelled(cfg MetricConfig) metricApachedruidSegmentLoadQueueCancelled { - m := metricApachedruidSegmentLoadQueueCancelled{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentLoadQueueCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.load_queue.count metric with initial data. -func (m *metricApachedruidSegmentLoadQueueCount) init() { - m.data.SetName("apachedruid.segment.load_queue.count") - m.data.SetDescription("Number of segments to load.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentLoadQueueCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("server", segmentServerAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentLoadQueueCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentLoadQueueCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentLoadQueueCount(cfg MetricConfig) metricApachedruidSegmentLoadQueueCount { - m := metricApachedruidSegmentLoadQueueCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentLoadQueueFailed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.load_queue.failed metric with initial data. -func (m *metricApachedruidSegmentLoadQueueFailed) init() { - m.data.SetName("apachedruid.segment.load_queue.failed") - m.data.SetDescription("Number of segment assignments that failed to complete.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentLoadQueueFailed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("server", segmentServerAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentLoadQueueFailed) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentLoadQueueFailed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentLoadQueueFailed(cfg MetricConfig) metricApachedruidSegmentLoadQueueFailed { - m := metricApachedruidSegmentLoadQueueFailed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentLoadQueueSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.load_queue.size metric with initial data. -func (m *metricApachedruidSegmentLoadQueueSize) init() { - m.data.SetName("apachedruid.segment.load_queue.size") - m.data.SetDescription("Size in bytes of segments to load.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentLoadQueueSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("server", segmentServerAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentLoadQueueSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentLoadQueueSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentLoadQueueSize(cfg MetricConfig) metricApachedruidSegmentLoadQueueSize { - m := metricApachedruidSegmentLoadQueueSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentLoadQueueSuccess struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.load_queue.success metric with initial data. -func (m *metricApachedruidSegmentLoadQueueSuccess) init() { - m.data.SetName("apachedruid.segment.load_queue.success") - m.data.SetDescription("Number of segment assignments that completed successfully.") - m.data.SetUnit("1") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentLoadQueueSuccess) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("server", segmentServerAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentLoadQueueSuccess) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentLoadQueueSuccess) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentLoadQueueSuccess(cfg MetricConfig) metricApachedruidSegmentLoadQueueSuccess { - m := metricApachedruidSegmentLoadQueueSuccess{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentMax struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.max metric with initial data. -func (m *metricApachedruidSegmentMax) init() { - m.data.SetName("apachedruid.segment.max") - m.data.SetDescription("Maximum byte limit available for segments.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSegmentMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentMax) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentMax) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentMax(cfg MetricConfig) metricApachedruidSegmentMax { - m := metricApachedruidSegmentMax{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentMoveSkippedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.move_skipped.count metric with initial data. -func (m *metricApachedruidSegmentMoveSkippedCount) init() { - m.data.SetName("apachedruid.segment.move_skipped.count") - m.data.SetDescription("Number of segments that were chosen for balancing but could not be moved. This can occur when segments are already optimally placed.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentMoveSkippedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("description", segmentDescriptionAttributeValue) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentMoveSkippedCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentMoveSkippedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentMoveSkippedCount(cfg MetricConfig) metricApachedruidSegmentMoveSkippedCount { - m := metricApachedruidSegmentMoveSkippedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentMovedBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.moved.bytes metric with initial data. -func (m *metricApachedruidSegmentMovedBytes) init() { - m.data.SetName("apachedruid.segment.moved.bytes") - m.data.SetDescription("Size in bytes of segments moved/archived via the Move Task.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentMovedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", segmentTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", segmentGroupIDAttributeValue) - dp.Attributes().PutStr("tags", segmentTagsAttributeValue) - dp.Attributes().PutStr("task_id", segmentTaskIDAttributeValue) - dp.Attributes().PutStr("interval", segmentIntervalAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentMovedBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentMovedBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentMovedBytes(cfg MetricConfig) metricApachedruidSegmentMovedBytes { - m := metricApachedruidSegmentMovedBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentMovedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.moved.count metric with initial data. -func (m *metricApachedruidSegmentMovedCount) init() { - m.data.SetName("apachedruid.segment.moved.count") - m.data.SetDescription("Number of segments moved in the cluster.") - m.data.SetUnit("{segments}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentMovedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentMovedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentMovedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentMovedCount(cfg MetricConfig) metricApachedruidSegmentMovedCount { - m := metricApachedruidSegmentMovedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentNukedBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.nuked.bytes metric with initial data. -func (m *metricApachedruidSegmentNukedBytes) init() { - m.data.SetName("apachedruid.segment.nuked.bytes") - m.data.SetDescription("Size in bytes of segments deleted via the Kill Task.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentNukedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", segmentTaskTypeAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", segmentGroupIDAttributeValue) - dp.Attributes().PutStr("tags", segmentTagsAttributeValue) - dp.Attributes().PutStr("task_id", segmentTaskIDAttributeValue) - dp.Attributes().PutStr("interval", segmentIntervalAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentNukedBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentNukedBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentNukedBytes(cfg MetricConfig) metricApachedruidSegmentNukedBytes { - m := metricApachedruidSegmentNukedBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentOverShadowedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.over_shadowed.count metric with initial data. -func (m *metricApachedruidSegmentOverShadowedCount) init() { - m.data.SetName("apachedruid.segment.over_shadowed.count") - m.data.SetDescription("Number of segments marked as unused due to being overshadowed.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSegmentOverShadowedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentOverShadowedCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentOverShadowedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentOverShadowedCount(cfg MetricConfig) metricApachedruidSegmentOverShadowedCount { - m := metricApachedruidSegmentOverShadowedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentPendingDelete struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.pending_delete metric with initial data. -func (m *metricApachedruidSegmentPendingDelete) init() { - m.data.SetName("apachedruid.segment.pending_delete") - m.data.SetDescription("On-disk size in bytes of segments that are waiting to be cleared out.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSegmentPendingDelete) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentPendingDelete) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentPendingDelete) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentPendingDelete(cfg MetricConfig) metricApachedruidSegmentPendingDelete { - m := metricApachedruidSegmentPendingDelete{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentRowCountAvg struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.row_count.avg metric with initial data. -func (m *metricApachedruidSegmentRowCountAvg) init() { - m.data.SetName("apachedruid.segment.row_count.avg") - m.data.SetDescription("The average number of rows per segment on a historical. `SegmentStatsMonitor` must be enabled.") - m.data.SetUnit("{rows}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentRowCountAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentRowCountAvg) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentRowCountAvg) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentRowCountAvg(cfg MetricConfig) metricApachedruidSegmentRowCountAvg { - m := metricApachedruidSegmentRowCountAvg{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentRowCountRangeCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.row_count.range.count metric with initial data. -func (m *metricApachedruidSegmentRowCountRangeCount) init() { - m.data.SetName("apachedruid.segment.row_count.range.count") - m.data.SetDescription("The number of segments in a bucket. `SegmentStatsMonitor` must be enabled.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentRowCountRangeCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string, segmentRangeAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) - dp.Attributes().PutStr("range", segmentRangeAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentRowCountRangeCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentRowCountRangeCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentRowCountRangeCount(cfg MetricConfig) metricApachedruidSegmentRowCountRangeCount { - m := metricApachedruidSegmentRowCountRangeCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentScanActive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.scan.active metric with initial data. -func (m *metricApachedruidSegmentScanActive) init() { - m.data.SetName("apachedruid.segment.scan.active") - m.data.SetDescription("Number of segments currently scanned. This metric also indicates how many threads from `druid.processing.numThreads` are currently being used.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSegmentScanActive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentScanActive) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentScanActive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentScanActive(cfg MetricConfig) metricApachedruidSegmentScanActive { - m := metricApachedruidSegmentScanActive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentScanPending struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.scan.pending metric with initial data. -func (m *metricApachedruidSegmentScanPending) init() { - m.data.SetName("apachedruid.segment.scan.pending") - m.data.SetDescription("Number of segments in queue waiting to be scanned.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSegmentScanPending) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentScanPending) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentScanPending) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentScanPending(cfg MetricConfig) metricApachedruidSegmentScanPending { - m := metricApachedruidSegmentScanPending{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.size metric with initial data. -func (m *metricApachedruidSegmentSize) init() { - m.data.SetName("apachedruid.segment.size") - m.data.SetDescription("Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentSize(cfg MetricConfig) metricApachedruidSegmentSize { - m := metricApachedruidSegmentSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentSkipCompactBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.skip_compact.bytes metric with initial data. -func (m *metricApachedruidSegmentSkipCompactBytes) init() { - m.data.SetName("apachedruid.segment.skip_compact.bytes") - m.data.SetDescription("Total bytes of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentSkipCompactBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentSkipCompactBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentSkipCompactBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentSkipCompactBytes(cfg MetricConfig) metricApachedruidSegmentSkipCompactBytes { - m := metricApachedruidSegmentSkipCompactBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentSkipCompactCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.skip_compact.count metric with initial data. -func (m *metricApachedruidSegmentSkipCompactCount) init() { - m.data.SetName("apachedruid.segment.skip_compact.count") - m.data.SetDescription("Total number of segments of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentSkipCompactCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentSkipCompactCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentSkipCompactCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentSkipCompactCount(cfg MetricConfig) metricApachedruidSegmentSkipCompactCount { - m := metricApachedruidSegmentSkipCompactCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentUnavailableCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.unavailable.count metric with initial data. -func (m *metricApachedruidSegmentUnavailableCount) init() { - m.data.SetName("apachedruid.segment.unavailable.count") - m.data.SetDescription("Number of unique segments left to load until all used segments are available for queries.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentUnavailableCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentUnavailableCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentUnavailableCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentUnavailableCount(cfg MetricConfig) metricApachedruidSegmentUnavailableCount { - m := metricApachedruidSegmentUnavailableCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentUnderReplicatedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.under_replicated.count metric with initial data. -func (m *metricApachedruidSegmentUnderReplicatedCount) init() { - m.data.SetName("apachedruid.segment.under_replicated.count") - m.data.SetDescription("Number of segments, including replicas, left to load until all used segments are available for queries.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentUnderReplicatedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentUnderReplicatedCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentUnderReplicatedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentUnderReplicatedCount(cfg MetricConfig) metricApachedruidSegmentUnderReplicatedCount { - m := metricApachedruidSegmentUnderReplicatedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentUnneededCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.unneeded.count metric with initial data. -func (m *metricApachedruidSegmentUnneededCount) init() { - m.data.SetName("apachedruid.segment.unneeded.count") - m.data.SetDescription("Number of segments dropped due to being marked as unused.") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentUnneededCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentUnneededCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentUnneededCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentUnneededCount(cfg MetricConfig) metricApachedruidSegmentUnneededCount { - m := metricApachedruidSegmentUnneededCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentUsed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.used metric with initial data. -func (m *metricApachedruidSegmentUsed) init() { - m.data.SetName("apachedruid.segment.used") - m.data.SetDescription("Bytes used for served segments.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentUsed) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentUsed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentUsed(cfg MetricConfig) metricApachedruidSegmentUsed { - m := metricApachedruidSegmentUsed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentUsedPercent struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.used_percent metric with initial data. -func (m *metricApachedruidSegmentUsedPercent) init() { - m.data.SetName("apachedruid.segment.used_percent") - m.data.SetDescription("Percentage of space used by served segments.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentUsedPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutStr("priority", segmentPriorityAttributeValue) - dp.Attributes().PutStr("tier", segmentTierAttributeValue) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentUsedPercent) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentUsedPercent) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentUsedPercent(cfg MetricConfig) metricApachedruidSegmentUsedPercent { - m := metricApachedruidSegmentUsedPercent{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentWaitCompactBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.wait_compact.bytes metric with initial data. -func (m *metricApachedruidSegmentWaitCompactBytes) init() { - m.data.SetName("apachedruid.segment.wait_compact.bytes") - m.data.SetDescription("Total bytes of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentWaitCompactBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentWaitCompactBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentWaitCompactBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentWaitCompactBytes(cfg MetricConfig) metricApachedruidSegmentWaitCompactBytes { - m := metricApachedruidSegmentWaitCompactBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSegmentWaitCompactCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.segment.wait_compact.count metric with initial data. -func (m *metricApachedruidSegmentWaitCompactCount) init() { - m.data.SetName("apachedruid.segment.wait_compact.count") - m.data.SetDescription("Total number of segments of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).") - m.data.SetUnit("{segments}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSegmentWaitCompactCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", segmentDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSegmentWaitCompactCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSegmentWaitCompactCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSegmentWaitCompactCount(cfg MetricConfig) metricApachedruidSegmentWaitCompactCount { - m := metricApachedruidSegmentWaitCompactCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidServerviewInitTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.serverview.init.time metric with initial data. -func (m *metricApachedruidServerviewInitTime) init() { - m.data.SetName("apachedruid.serverview.init.time") - m.data.SetDescription("Time taken to initialize the broker server view. Useful to detect if brokers are taking too long to start.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidServerviewInitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidServerviewInitTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidServerviewInitTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidServerviewInitTime(cfg MetricConfig) metricApachedruidServerviewInitTime { - m := metricApachedruidServerviewInitTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidServerviewSyncHealthy struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.serverview.sync.healthy metric with initial data. -func (m *metricApachedruidServerviewSyncHealthy) init() { - m.data.SetName("apachedruid.serverview.sync.healthy") - m.data.SetDescription("Sync status of the Broker with a segment-loading server such as a Historical or Peon. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. This metric can be used in conjunction with `serverview/sync/unstableTime` to debug slow startup of Brokers.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidServerviewSyncHealthy) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverviewTierAttributeValue string, serverviewServerAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", serverviewTierAttributeValue) - dp.Attributes().PutStr("server", serverviewServerAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidServerviewSyncHealthy) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidServerviewSyncHealthy) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidServerviewSyncHealthy(cfg MetricConfig) metricApachedruidServerviewSyncHealthy { - m := metricApachedruidServerviewSyncHealthy{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidServerviewSyncUnstableTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.serverview.sync.unstable_time metric with initial data. -func (m *metricApachedruidServerviewSyncUnstableTime) init() { - m.data.SetName("apachedruid.serverview.sync.unstable_time") - m.data.SetDescription("Time in milliseconds for which the Broker has been failing to sync with a segment-loading server. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidServerviewSyncUnstableTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverviewTierAttributeValue string, serverviewServerAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", serverviewTierAttributeValue) - dp.Attributes().PutStr("server", serverviewServerAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidServerviewSyncUnstableTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidServerviewSyncUnstableTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidServerviewSyncUnstableTime(cfg MetricConfig) metricApachedruidServerviewSyncUnstableTime { - m := metricApachedruidServerviewSyncUnstableTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSQLQueryBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sql_query.bytes metric with initial data. -func (m *metricApachedruidSQLQueryBytes) init() { - m.data.SetName("apachedruid.sql_query.bytes") - m.data.SetDescription("Number of bytes returned in the SQL query response.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSQLQueryBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", sqlQueryDataSourceAttributeValue) - dp.Attributes().PutStr("native_query_ids", sqlQueryNativeQueryIdsAttributeValue) - dp.Attributes().PutStr("engine", sqlQueryEngineAttributeValue) - dp.Attributes().PutStr("remote_address", sqlQueryRemoteAddressAttributeValue) - dp.Attributes().PutStr("id", sqlQueryIDAttributeValue) - dp.Attributes().PutStr("success", sqlQuerySuccessAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSQLQueryBytes) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSQLQueryBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSQLQueryBytes(cfg MetricConfig) metricApachedruidSQLQueryBytes { - m := metricApachedruidSQLQueryBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSQLQueryPlanningTimeMs struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sql_query.planning_time_ms metric with initial data. -func (m *metricApachedruidSQLQueryPlanningTimeMs) init() { - m.data.SetName("apachedruid.sql_query.planning_time_ms") - m.data.SetDescription("Milliseconds taken to plan a SQL to native query.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSQLQueryPlanningTimeMs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", sqlQueryDataSourceAttributeValue) - dp.Attributes().PutStr("native_query_ids", sqlQueryNativeQueryIdsAttributeValue) - dp.Attributes().PutStr("engine", sqlQueryEngineAttributeValue) - dp.Attributes().PutStr("remote_address", sqlQueryRemoteAddressAttributeValue) - dp.Attributes().PutStr("id", sqlQueryIDAttributeValue) - dp.Attributes().PutStr("success", sqlQuerySuccessAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSQLQueryPlanningTimeMs) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSQLQueryPlanningTimeMs) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSQLQueryPlanningTimeMs(cfg MetricConfig) metricApachedruidSQLQueryPlanningTimeMs { - m := metricApachedruidSQLQueryPlanningTimeMs{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSQLQueryTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sql_query.time metric with initial data. -func (m *metricApachedruidSQLQueryTime) init() { - m.data.SetName("apachedruid.sql_query.time") - m.data.SetDescription("Milliseconds taken to complete a SQL query.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSQLQueryTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", sqlQueryDataSourceAttributeValue) - dp.Attributes().PutStr("native_query_ids", sqlQueryNativeQueryIdsAttributeValue) - dp.Attributes().PutStr("engine", sqlQueryEngineAttributeValue) - dp.Attributes().PutStr("remote_address", sqlQueryRemoteAddressAttributeValue) - dp.Attributes().PutStr("id", sqlQueryIDAttributeValue) - dp.Attributes().PutStr("success", sqlQuerySuccessAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSQLQueryTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSQLQueryTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSQLQueryTime(cfg MetricConfig) metricApachedruidSQLQueryTime { - m := metricApachedruidSQLQueryTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSubqueryByteLimitCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.subquery.byte_limit.count metric with initial data. -func (m *metricApachedruidSubqueryByteLimitCount) init() { - m.data.SetName("apachedruid.subquery.byte_limit.count") - m.data.SetDescription("Number of subqueries whose results are materialized as frames (Druid's internal byte representation of rows).") - m.data.SetUnit("{subqueries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidSubqueryByteLimitCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSubqueryByteLimitCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSubqueryByteLimitCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSubqueryByteLimitCount(cfg MetricConfig) metricApachedruidSubqueryByteLimitCount { - m := metricApachedruidSubqueryByteLimitCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSubqueryFallbackCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.subquery.fallback.count metric with initial data. -func (m *metricApachedruidSubqueryFallbackCount) init() { - m.data.SetName("apachedruid.subquery.fallback.count") - m.data.SetDescription("Number of subqueries which cannot be materialized as frames.") - m.data.SetUnit("{subqueries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidSubqueryFallbackCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSubqueryFallbackCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSubqueryFallbackCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSubqueryFallbackCount(cfg MetricConfig) metricApachedruidSubqueryFallbackCount { - m := metricApachedruidSubqueryFallbackCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSubqueryFallbackInsufficientTypeCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.subquery.fallback.insufficient_type.count metric with initial data. -func (m *metricApachedruidSubqueryFallbackInsufficientTypeCount) init() { - m.data.SetName("apachedruid.subquery.fallback.insufficient_type.count") - m.data.SetDescription("Number of subqueries which cannot be materialized as frames due to insufficient type information in the row signature.") - m.data.SetUnit("{subqueries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidSubqueryFallbackInsufficientTypeCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSubqueryFallbackInsufficientTypeCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSubqueryFallbackInsufficientTypeCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSubqueryFallbackInsufficientTypeCount(cfg MetricConfig) metricApachedruidSubqueryFallbackInsufficientTypeCount { - m := metricApachedruidSubqueryFallbackInsufficientTypeCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSubqueryFallbackUnknownReasonCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.subquery.fallback.unknown_reason.count metric with initial data. -func (m *metricApachedruidSubqueryFallbackUnknownReasonCount) init() { - m.data.SetName("apachedruid.subquery.fallback.unknown_reason.count") - m.data.SetDescription("Number of subqueries which cannot be materialized as frames due other reasons.") - m.data.SetUnit("{subqueries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidSubqueryFallbackUnknownReasonCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSubqueryFallbackUnknownReasonCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSubqueryFallbackUnknownReasonCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSubqueryFallbackUnknownReasonCount(cfg MetricConfig) metricApachedruidSubqueryFallbackUnknownReasonCount { - m := metricApachedruidSubqueryFallbackUnknownReasonCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSubqueryRowLimitCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.subquery.row_limit.count metric with initial data. -func (m *metricApachedruidSubqueryRowLimitCount) init() { - m.data.SetName("apachedruid.subquery.row_limit.count") - m.data.SetDescription("Number of subqueries whose results are materialized as rows (Java objects on heap).") - m.data.SetUnit("{subqueries}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) -} - -func (m *metricApachedruidSubqueryRowLimitCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSubqueryRowLimitCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSubqueryRowLimitCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSubqueryRowLimitCount(cfg MetricConfig) metricApachedruidSubqueryRowLimitCount { - m := metricApachedruidSubqueryRowLimitCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysCPU struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.cpu metric with initial data. -func (m *metricApachedruidSysCPU) init() { - m.data.SetName("apachedruid.sys.cpu") - m.data.SetDescription("CPU used.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysCPUTimeAttributeValue string, sysCPUNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("cpu_time", sysCPUTimeAttributeValue) - dp.Attributes().PutStr("cpu_name", sysCPUNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysCPU) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysCPU) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysCPU(cfg MetricConfig) metricApachedruidSysCPU { - m := metricApachedruidSysCPU{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysDiskQueue struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.disk.queue metric with initial data. -func (m *metricApachedruidSysDiskQueue) init() { - m.data.SetName("apachedruid.sys.disk.queue") - m.data.SetDescription("Disk queue length. Measures number of requests waiting to be processed by disk.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysDiskQueue) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysDiskQueue) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysDiskQueue) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysDiskQueue(cfg MetricConfig) metricApachedruidSysDiskQueue { - m := metricApachedruidSysDiskQueue{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysDiskReadCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.disk.read.count metric with initial data. -func (m *metricApachedruidSysDiskReadCount) init() { - m.data.SetName("apachedruid.sys.disk.read.count") - m.data.SetDescription("Reads from disk.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysDiskReadCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysDiskReadCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysDiskReadCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysDiskReadCount(cfg MetricConfig) metricApachedruidSysDiskReadCount { - m := metricApachedruidSysDiskReadCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysDiskReadSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.disk.read.size metric with initial data. -func (m *metricApachedruidSysDiskReadSize) init() { - m.data.SetName("apachedruid.sys.disk.read.size") - m.data.SetDescription("Bytes read from disk. One indicator of the amount of paging occurring for segments.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysDiskReadSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysDiskReadSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysDiskReadSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysDiskReadSize(cfg MetricConfig) metricApachedruidSysDiskReadSize { - m := metricApachedruidSysDiskReadSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysDiskTransferTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.disk.transfer_time metric with initial data. -func (m *metricApachedruidSysDiskTransferTime) init() { - m.data.SetName("apachedruid.sys.disk.transfer_time") - m.data.SetDescription("Transfer time to read from or write to disk.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysDiskTransferTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysDiskTransferTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysDiskTransferTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysDiskTransferTime(cfg MetricConfig) metricApachedruidSysDiskTransferTime { - m := metricApachedruidSysDiskTransferTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysDiskWriteCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.disk.write.count metric with initial data. -func (m *metricApachedruidSysDiskWriteCount) init() { - m.data.SetName("apachedruid.sys.disk.write.count") - m.data.SetDescription("Writes to disk.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysDiskWriteCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysDiskWriteCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysDiskWriteCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysDiskWriteCount(cfg MetricConfig) metricApachedruidSysDiskWriteCount { - m := metricApachedruidSysDiskWriteCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysDiskWriteSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.disk.write.size metric with initial data. -func (m *metricApachedruidSysDiskWriteSize) init() { - m.data.SetName("apachedruid.sys.disk.write.size") - m.data.SetDescription("Bytes written to disk. One indicator of the amount of paging occurring for segments.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysDiskWriteSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("disk_name", sysDiskNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysDiskWriteSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysDiskWriteSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysDiskWriteSize(cfg MetricConfig) metricApachedruidSysDiskWriteSize { - m := metricApachedruidSysDiskWriteSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysFsFilesCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.fs.files.count metric with initial data. -func (m *metricApachedruidSysFsFilesCount) init() { - m.data.SetName("apachedruid.sys.fs.files.count") - m.data.SetDescription("Filesystem total IO nodes.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysFsFilesCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) - dp.Attributes().PutStr("fs_dev_name", sysFsDevNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysFsFilesCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysFsFilesCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysFsFilesCount(cfg MetricConfig) metricApachedruidSysFsFilesCount { - m := metricApachedruidSysFsFilesCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysFsFilesFree struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.fs.files.free metric with initial data. -func (m *metricApachedruidSysFsFilesFree) init() { - m.data.SetName("apachedruid.sys.fs.files.free") - m.data.SetDescription("Filesystem free IO nodes.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysFsFilesFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) - dp.Attributes().PutStr("fs_dev_name", sysFsDevNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysFsFilesFree) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysFsFilesFree) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysFsFilesFree(cfg MetricConfig) metricApachedruidSysFsFilesFree { - m := metricApachedruidSysFsFilesFree{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysFsMax struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.fs.max metric with initial data. -func (m *metricApachedruidSysFsMax) init() { - m.data.SetName("apachedruid.sys.fs.max") - m.data.SetDescription("Filesystem bytes max.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysFsMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) - dp.Attributes().PutStr("fs_dev_name", sysFsDevNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysFsMax) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysFsMax) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysFsMax(cfg MetricConfig) metricApachedruidSysFsMax { - m := metricApachedruidSysFsMax{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysFsUsed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.fs.used metric with initial data. -func (m *metricApachedruidSysFsUsed) init() { - m.data.SetName("apachedruid.sys.fs.used") - m.data.SetDescription("Filesystem bytes used.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysFsUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) - dp.Attributes().PutStr("fs_dev_name", sysFsDevNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysFsUsed) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysFsUsed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysFsUsed(cfg MetricConfig) metricApachedruidSysFsUsed { - m := metricApachedruidSysFsUsed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysLa1 struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.la.1 metric with initial data. -func (m *metricApachedruidSysLa1) init() { - m.data.SetName("apachedruid.sys.la.1") - m.data.SetDescription("System CPU load averages over past `i` minutes, where `i={1,5,15}`.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysLa1) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysLa1) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysLa1) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysLa1(cfg MetricConfig) metricApachedruidSysLa1 { - m := metricApachedruidSysLa1{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysLa15 struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.la.15 metric with initial data. -func (m *metricApachedruidSysLa15) init() { - m.data.SetName("apachedruid.sys.la.15") - m.data.SetDescription("System CPU load averages over past `i` minutes, where `i={1,5,15}`.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysLa15) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysLa15) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysLa15) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysLa15(cfg MetricConfig) metricApachedruidSysLa15 { - m := metricApachedruidSysLa15{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysLa5 struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.la.5 metric with initial data. -func (m *metricApachedruidSysLa5) init() { - m.data.SetName("apachedruid.sys.la.5") - m.data.SetDescription("System CPU load averages over past `i` minutes, where `i={1,5,15}`.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysLa5) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysLa5) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysLa5) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysLa5(cfg MetricConfig) metricApachedruidSysLa5 { - m := metricApachedruidSysLa5{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysMemFree struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.mem.free metric with initial data. -func (m *metricApachedruidSysMemFree) init() { - m.data.SetName("apachedruid.sys.mem.free") - m.data.SetDescription("Memory free.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysMemFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysMemFree) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysMemFree) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysMemFree(cfg MetricConfig) metricApachedruidSysMemFree { - m := metricApachedruidSysMemFree{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysMemMax struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.mem.max metric with initial data. -func (m *metricApachedruidSysMemMax) init() { - m.data.SetName("apachedruid.sys.mem.max") - m.data.SetDescription("Memory max.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysMemMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysMemMax) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysMemMax) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysMemMax(cfg MetricConfig) metricApachedruidSysMemMax { - m := metricApachedruidSysMemMax{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysMemUsed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.mem.used metric with initial data. -func (m *metricApachedruidSysMemUsed) init() { - m.data.SetName("apachedruid.sys.mem.used") - m.data.SetDescription("Memory used.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysMemUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysMemUsed) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysMemUsed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysMemUsed(cfg MetricConfig) metricApachedruidSysMemUsed { - m := metricApachedruidSysMemUsed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysNetReadDropped struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.net.read.dropped metric with initial data. -func (m *metricApachedruidSysNetReadDropped) init() { - m.data.SetName("apachedruid.sys.net.read.dropped") - m.data.SetDescription("Total packets dropped coming from network.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysNetReadDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) - dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) - dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysNetReadDropped) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysNetReadDropped) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysNetReadDropped(cfg MetricConfig) metricApachedruidSysNetReadDropped { - m := metricApachedruidSysNetReadDropped{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysNetReadErrors struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.net.read.errors metric with initial data. -func (m *metricApachedruidSysNetReadErrors) init() { - m.data.SetName("apachedruid.sys.net.read.errors") - m.data.SetDescription("Total network read errors.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysNetReadErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) - dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) - dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysNetReadErrors) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysNetReadErrors) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysNetReadErrors(cfg MetricConfig) metricApachedruidSysNetReadErrors { - m := metricApachedruidSysNetReadErrors{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysNetReadPackets struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.net.read.packets metric with initial data. -func (m *metricApachedruidSysNetReadPackets) init() { - m.data.SetName("apachedruid.sys.net.read.packets") - m.data.SetDescription("Total packets read from the network.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysNetReadPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) - dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) - dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysNetReadPackets) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysNetReadPackets) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysNetReadPackets(cfg MetricConfig) metricApachedruidSysNetReadPackets { - m := metricApachedruidSysNetReadPackets{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysNetReadSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.net.read.size metric with initial data. -func (m *metricApachedruidSysNetReadSize) init() { - m.data.SetName("apachedruid.sys.net.read.size") - m.data.SetDescription("Bytes read from the network.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysNetReadSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) - dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) - dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysNetReadSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysNetReadSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysNetReadSize(cfg MetricConfig) metricApachedruidSysNetReadSize { - m := metricApachedruidSysNetReadSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysNetWriteCollisions struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.net.write.collisions metric with initial data. -func (m *metricApachedruidSysNetWriteCollisions) init() { - m.data.SetName("apachedruid.sys.net.write.collisions") - m.data.SetDescription("Total network write collisions.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysNetWriteCollisions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) - dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) - dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysNetWriteCollisions) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysNetWriteCollisions) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysNetWriteCollisions(cfg MetricConfig) metricApachedruidSysNetWriteCollisions { - m := metricApachedruidSysNetWriteCollisions{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysNetWriteErrors struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.net.write.errors metric with initial data. -func (m *metricApachedruidSysNetWriteErrors) init() { - m.data.SetName("apachedruid.sys.net.write.errors") - m.data.SetDescription("Total network write errors.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysNetWriteErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) - dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) - dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysNetWriteErrors) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysNetWriteErrors) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysNetWriteErrors(cfg MetricConfig) metricApachedruidSysNetWriteErrors { - m := metricApachedruidSysNetWriteErrors{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysNetWritePackets struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.net.write.packets metric with initial data. -func (m *metricApachedruidSysNetWritePackets) init() { - m.data.SetName("apachedruid.sys.net.write.packets") - m.data.SetDescription("Total packets written to the network.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysNetWritePackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) - dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) - dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysNetWritePackets) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysNetWritePackets) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysNetWritePackets(cfg MetricConfig) metricApachedruidSysNetWritePackets { - m := metricApachedruidSysNetWritePackets{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysNetWriteSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.net.write.size metric with initial data. -func (m *metricApachedruidSysNetWriteSize) init() { - m.data.SetName("apachedruid.sys.net.write.size") - m.data.SetDescription("Bytes written to the network.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysNetWriteSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("net_hwaddr", sysNetHwaddrAttributeValue) - dp.Attributes().PutStr("net_name", sysNetNameAttributeValue) - dp.Attributes().PutStr("net_address", sysNetAddressAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysNetWriteSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysNetWriteSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysNetWriteSize(cfg MetricConfig) metricApachedruidSysNetWriteSize { - m := metricApachedruidSysNetWriteSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysStorageUsed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.storage.used metric with initial data. -func (m *metricApachedruidSysStorageUsed) init() { - m.data.SetName("apachedruid.sys.storage.used") - m.data.SetDescription("Disk space used.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidSysStorageUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("fs_dir_name", sysFsDirNameAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysStorageUsed) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysStorageUsed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysStorageUsed(cfg MetricConfig) metricApachedruidSysStorageUsed { - m := metricApachedruidSysStorageUsed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysSwapFree struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.swap.free metric with initial data. -func (m *metricApachedruidSysSwapFree) init() { - m.data.SetName("apachedruid.sys.swap.free") - m.data.SetDescription("Free swap.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysSwapFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysSwapFree) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysSwapFree) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysSwapFree(cfg MetricConfig) metricApachedruidSysSwapFree { - m := metricApachedruidSysSwapFree{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysSwapMax struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.swap.max metric with initial data. -func (m *metricApachedruidSysSwapMax) init() { - m.data.SetName("apachedruid.sys.swap.max") - m.data.SetDescription("Max swap.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysSwapMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysSwapMax) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysSwapMax) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysSwapMax(cfg MetricConfig) metricApachedruidSysSwapMax { - m := metricApachedruidSysSwapMax{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysSwapPageIn struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.swap.page_in metric with initial data. -func (m *metricApachedruidSysSwapPageIn) init() { - m.data.SetName("apachedruid.sys.swap.page_in") - m.data.SetDescription("Paged in swap.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysSwapPageIn) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysSwapPageIn) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysSwapPageIn) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysSwapPageIn(cfg MetricConfig) metricApachedruidSysSwapPageIn { - m := metricApachedruidSysSwapPageIn{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysSwapPageOut struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.swap.page_out metric with initial data. -func (m *metricApachedruidSysSwapPageOut) init() { - m.data.SetName("apachedruid.sys.swap.page_out") - m.data.SetDescription("Paged out swap.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysSwapPageOut) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysSwapPageOut) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysSwapPageOut) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysSwapPageOut(cfg MetricConfig) metricApachedruidSysSwapPageOut { - m := metricApachedruidSysSwapPageOut{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4ActiveOpens struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.active_opens metric with initial data. -func (m *metricApachedruidSysTcpv4ActiveOpens) init() { - m.data.SetName("apachedruid.sys.tcpv4.active_opens") - m.data.SetDescription("Total TCP active open connections.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4ActiveOpens) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4ActiveOpens) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4ActiveOpens) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4ActiveOpens(cfg MetricConfig) metricApachedruidSysTcpv4ActiveOpens { - m := metricApachedruidSysTcpv4ActiveOpens{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4AttemptFails struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.attempt_fails metric with initial data. -func (m *metricApachedruidSysTcpv4AttemptFails) init() { - m.data.SetName("apachedruid.sys.tcpv4.attempt_fails") - m.data.SetDescription("Total TCP active connection failures.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4AttemptFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4AttemptFails) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4AttemptFails) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4AttemptFails(cfg MetricConfig) metricApachedruidSysTcpv4AttemptFails { - m := metricApachedruidSysTcpv4AttemptFails{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4EstabResets struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.estab_resets metric with initial data. -func (m *metricApachedruidSysTcpv4EstabResets) init() { - m.data.SetName("apachedruid.sys.tcpv4.estab_resets") - m.data.SetDescription("Total TCP connection resets.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4EstabResets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4EstabResets) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4EstabResets) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4EstabResets(cfg MetricConfig) metricApachedruidSysTcpv4EstabResets { - m := metricApachedruidSysTcpv4EstabResets{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4InErrs struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.in.errs metric with initial data. -func (m *metricApachedruidSysTcpv4InErrs) init() { - m.data.SetName("apachedruid.sys.tcpv4.in.errs") - m.data.SetDescription("Errors while reading segments.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4InErrs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4InErrs) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4InErrs) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4InErrs(cfg MetricConfig) metricApachedruidSysTcpv4InErrs { - m := metricApachedruidSysTcpv4InErrs{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4InSegs struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.in.segs metric with initial data. -func (m *metricApachedruidSysTcpv4InSegs) init() { - m.data.SetName("apachedruid.sys.tcpv4.in.segs") - m.data.SetDescription("Total segments received in connection.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4InSegs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4InSegs) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4InSegs) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4InSegs(cfg MetricConfig) metricApachedruidSysTcpv4InSegs { - m := metricApachedruidSysTcpv4InSegs{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4OutRsts struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.out.rsts metric with initial data. -func (m *metricApachedruidSysTcpv4OutRsts) init() { - m.data.SetName("apachedruid.sys.tcpv4.out.rsts") - m.data.SetDescription("Total `out reset` packets sent to reset the connection.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4OutRsts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4OutRsts) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4OutRsts) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4OutRsts(cfg MetricConfig) metricApachedruidSysTcpv4OutRsts { - m := metricApachedruidSysTcpv4OutRsts{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4OutSegs struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.out.segs metric with initial data. -func (m *metricApachedruidSysTcpv4OutSegs) init() { - m.data.SetName("apachedruid.sys.tcpv4.out.segs") - m.data.SetDescription("Total segments sent.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4OutSegs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4OutSegs) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4OutSegs) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4OutSegs(cfg MetricConfig) metricApachedruidSysTcpv4OutSegs { - m := metricApachedruidSysTcpv4OutSegs{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4PassiveOpens struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.passive_opens metric with initial data. -func (m *metricApachedruidSysTcpv4PassiveOpens) init() { - m.data.SetName("apachedruid.sys.tcpv4.passive_opens") - m.data.SetDescription("Total TCP passive open connections.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4PassiveOpens) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4PassiveOpens) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4PassiveOpens) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4PassiveOpens(cfg MetricConfig) metricApachedruidSysTcpv4PassiveOpens { - m := metricApachedruidSysTcpv4PassiveOpens{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysTcpv4RetransSegs struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.tcpv4.retrans.segs metric with initial data. -func (m *metricApachedruidSysTcpv4RetransSegs) init() { - m.data.SetName("apachedruid.sys.tcpv4.retrans.segs") - m.data.SetDescription("Total segments re-transmitted.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysTcpv4RetransSegs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysTcpv4RetransSegs) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysTcpv4RetransSegs) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysTcpv4RetransSegs(cfg MetricConfig) metricApachedruidSysTcpv4RetransSegs { - m := metricApachedruidSysTcpv4RetransSegs{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidSysUptime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.sys.uptime metric with initial data. -func (m *metricApachedruidSysUptime) init() { - m.data.SetName("apachedruid.sys.uptime") - m.data.SetDescription("Total system uptime.") - m.data.SetUnit("s") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidSysUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidSysUptime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidSysUptime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidSysUptime(cfg MetricConfig) metricApachedruidSysUptime { - m := metricApachedruidSysUptime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskActionBatchAttempts struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.action.batch.attempts metric with initial data. -func (m *metricApachedruidTaskActionBatchAttempts) init() { - m.data.SetName("apachedruid.task.action.batch.attempts") - m.data.SetDescription("Number of execution attempts for a single batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") - m.data.SetUnit("{attempts}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskActionBatchAttempts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interval", taskIntervalAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskActionBatchAttempts) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskActionBatchAttempts) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskActionBatchAttempts(cfg MetricConfig) metricApachedruidTaskActionBatchAttempts { - m := metricApachedruidTaskActionBatchAttempts{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskActionBatchQueueTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.action.batch.queue_time metric with initial data. -func (m *metricApachedruidTaskActionBatchQueueTime) init() { - m.data.SetName("apachedruid.task.action.batch.queue_time") - m.data.SetDescription("Milliseconds spent by a batch of task actions in queue. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskActionBatchQueueTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interval", taskIntervalAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskActionBatchQueueTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskActionBatchQueueTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskActionBatchQueueTime(cfg MetricConfig) metricApachedruidTaskActionBatchQueueTime { - m := metricApachedruidTaskActionBatchQueueTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskActionBatchRunTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.action.batch.run_time metric with initial data. -func (m *metricApachedruidTaskActionBatchRunTime) init() { - m.data.SetName("apachedruid.task.action.batch.run_time") - m.data.SetDescription("Milliseconds taken to execute a batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskActionBatchRunTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interval", taskIntervalAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskActionBatchRunTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskActionBatchRunTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskActionBatchRunTime(cfg MetricConfig) metricApachedruidTaskActionBatchRunTime { - m := metricApachedruidTaskActionBatchRunTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskActionBatchSize struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.action.batch.size metric with initial data. -func (m *metricApachedruidTaskActionBatchSize) init() { - m.data.SetName("apachedruid.task.action.batch.size") - m.data.SetDescription("Number of task actions in a batch that was executed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") - m.data.SetUnit("{actions}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskActionBatchSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interval", taskIntervalAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskActionBatchSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskActionBatchSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskActionBatchSize(cfg MetricConfig) metricApachedruidTaskActionBatchSize { - m := metricApachedruidTaskActionBatchSize{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskActionFailedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.action.failed.count metric with initial data. -func (m *metricApachedruidTaskActionFailedCount) init() { - m.data.SetName("apachedruid.task.action.failed.count") - m.data.SetDescription("Number of task actions that failed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") - m.data.SetUnit("{actions}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskActionFailedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", taskTypeAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) - dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) - dp.Attributes().PutStr("tags", taskTagsAttributeValue) - dp.Attributes().PutStr("task_id", taskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskActionFailedCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskActionFailedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskActionFailedCount(cfg MetricConfig) metricApachedruidTaskActionFailedCount { - m := metricApachedruidTaskActionFailedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskActionLogTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.action.log.time metric with initial data. -func (m *metricApachedruidTaskActionLogTime) init() { - m.data.SetName("apachedruid.task.action.log.time") - m.data.SetDescription("Milliseconds taken to log a task action to the audit log.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskActionLogTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", taskTypeAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) - dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) - dp.Attributes().PutStr("tags", taskTagsAttributeValue) - dp.Attributes().PutStr("task_id", taskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskActionLogTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskActionLogTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskActionLogTime(cfg MetricConfig) metricApachedruidTaskActionLogTime { - m := metricApachedruidTaskActionLogTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskActionRunTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.action.run.time metric with initial data. -func (m *metricApachedruidTaskActionRunTime) init() { - m.data.SetName("apachedruid.task.action.run.time") - m.data.SetDescription("Milliseconds taken to execute a task action.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskActionRunTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", taskTypeAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) - dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) - dp.Attributes().PutStr("tags", taskTagsAttributeValue) - dp.Attributes().PutStr("task_id", taskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskActionRunTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskActionRunTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskActionRunTime(cfg MetricConfig) metricApachedruidTaskActionRunTime { - m := metricApachedruidTaskActionRunTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskActionSuccessCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.action.success.count metric with initial data. -func (m *metricApachedruidTaskActionSuccessCount) init() { - m.data.SetName("apachedruid.task.action.success.count") - m.data.SetDescription("Number of task actions that were executed successfully during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).") - m.data.SetUnit("{actions}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskActionSuccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", taskTypeAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("task_action_type", taskActionTypeAttributeValue) - dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) - dp.Attributes().PutStr("tags", taskTagsAttributeValue) - dp.Attributes().PutStr("task_id", taskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskActionSuccessCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskActionSuccessCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskActionSuccessCount(cfg MetricConfig) metricApachedruidTaskActionSuccessCount { - m := metricApachedruidTaskActionSuccessCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskFailedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.failed.count metric with initial data. -func (m *metricApachedruidTaskFailedCount) init() { - m.data.SetName("apachedruid.task.failed.count") - m.data.SetDescription("Number of failed tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskFailedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskFailedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskFailedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskFailedCount(cfg MetricConfig) metricApachedruidTaskFailedCount { - m := metricApachedruidTaskFailedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskPendingCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.pending.count metric with initial data. -func (m *metricApachedruidTaskPendingCount) init() { - m.data.SetName("apachedruid.task.pending.count") - m.data.SetDescription("Number of current pending tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskPendingCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskPendingCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskPendingCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskPendingCount(cfg MetricConfig) metricApachedruidTaskPendingCount { - m := metricApachedruidTaskPendingCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskPendingTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.pending.time metric with initial data. -func (m *metricApachedruidTaskPendingTime) init() { - m.data.SetName("apachedruid.task.pending.time") - m.data.SetDescription("Milliseconds taken for a task to wait for running.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskPendingTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", taskTypeAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) - dp.Attributes().PutStr("tags", taskTagsAttributeValue) - dp.Attributes().PutStr("task_id", taskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskPendingTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskPendingTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskPendingTime(cfg MetricConfig) metricApachedruidTaskPendingTime { - m := metricApachedruidTaskPendingTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskRunTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.run.time metric with initial data. -func (m *metricApachedruidTaskRunTime) init() { - m.data.SetName("apachedruid.task.run.time") - m.data.SetDescription("Milliseconds taken to run a task.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskRunTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskStatusAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", taskTypeAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) - dp.Attributes().PutStr("task_status", taskStatusAttributeValue) - dp.Attributes().PutStr("tags", taskTagsAttributeValue) - dp.Attributes().PutStr("task_id", taskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskRunTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskRunTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskRunTime(cfg MetricConfig) metricApachedruidTaskRunTime { - m := metricApachedruidTaskRunTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskRunningCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.running.count metric with initial data. -func (m *metricApachedruidTaskRunningCount) init() { - m.data.SetName("apachedruid.task.running.count") - m.data.SetDescription("Number of current running tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskRunningCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskRunningCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskRunningCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskRunningCount(cfg MetricConfig) metricApachedruidTaskRunningCount { - m := metricApachedruidTaskRunningCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskSegmentAvailabilityWaitTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.segment_availability.wait.time metric with initial data. -func (m *metricApachedruidTaskSegmentAvailabilityWaitTime) init() { - m.data.SetName("apachedruid.task.segment_availability.wait.time") - m.data.SetDescription("The amount of milliseconds a batch indexing task waited for newly created segments to become available for querying.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskSegmentAvailabilityWaitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskSegmentAvailabilityConfirmedAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("task_type", taskTypeAttributeValue) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) - dp.Attributes().PutStr("group_id", taskGroupIDAttributeValue) - dp.Attributes().PutStr("segment_availability_confirmed", taskSegmentAvailabilityConfirmedAttributeValue) - dp.Attributes().PutStr("tags", taskTagsAttributeValue) - dp.Attributes().PutStr("task_id", taskIDAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskSegmentAvailabilityWaitTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskSegmentAvailabilityWaitTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskSegmentAvailabilityWaitTime(cfg MetricConfig) metricApachedruidTaskSegmentAvailabilityWaitTime { - m := metricApachedruidTaskSegmentAvailabilityWaitTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskSuccessCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.success.count metric with initial data. -func (m *metricApachedruidTaskSuccessCount) init() { - m.data.SetName("apachedruid.task.success.count") - m.data.SetDescription("Number of successful tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskSuccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskSuccessCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskSuccessCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskSuccessCount(cfg MetricConfig) metricApachedruidTaskSuccessCount { - m := metricApachedruidTaskSuccessCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskWaitingCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task.waiting.count metric with initial data. -func (m *metricApachedruidTaskWaitingCount) init() { - m.data.SetName("apachedruid.task.waiting.count") - m.data.SetDescription("Number of current waiting tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskWaitingCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("data_source", taskDataSourceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskWaitingCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskWaitingCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskWaitingCount(cfg MetricConfig) metricApachedruidTaskWaitingCount { - m := metricApachedruidTaskWaitingCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskSlotBlacklistedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task_slot.blacklisted.count metric with initial data. -func (m *metricApachedruidTaskSlotBlacklistedCount) init() { - m.data.SetName("apachedruid.task_slot.blacklisted.count") - m.data.SetDescription("Number of total task slots in blacklisted Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") - m.data.SetUnit("{slots}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskSlotBlacklistedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskSlotBlacklistedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskSlotBlacklistedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskSlotBlacklistedCount(cfg MetricConfig) metricApachedruidTaskSlotBlacklistedCount { - m := metricApachedruidTaskSlotBlacklistedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskSlotIdleCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task_slot.idle.count metric with initial data. -func (m *metricApachedruidTaskSlotIdleCount) init() { - m.data.SetName("apachedruid.task_slot.idle.count") - m.data.SetDescription("Number of idle task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") - m.data.SetUnit("{slots}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskSlotIdleCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskSlotIdleCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskSlotIdleCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskSlotIdleCount(cfg MetricConfig) metricApachedruidTaskSlotIdleCount { - m := metricApachedruidTaskSlotIdleCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskSlotLazyCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task_slot.lazy.count metric with initial data. -func (m *metricApachedruidTaskSlotLazyCount) init() { - m.data.SetName("apachedruid.task_slot.lazy.count") - m.data.SetDescription("Number of total task slots in lazy marked Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") - m.data.SetUnit("{slots}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskSlotLazyCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskSlotLazyCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskSlotLazyCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskSlotLazyCount(cfg MetricConfig) metricApachedruidTaskSlotLazyCount { - m := metricApachedruidTaskSlotLazyCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskSlotTotalCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task_slot.total.count metric with initial data. -func (m *metricApachedruidTaskSlotTotalCount) init() { - m.data.SetName("apachedruid.task_slot.total.count") - m.data.SetDescription("Number of total task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") - m.data.SetUnit("{slots}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskSlotTotalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskSlotTotalCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskSlotTotalCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskSlotTotalCount(cfg MetricConfig) metricApachedruidTaskSlotTotalCount { - m := metricApachedruidTaskSlotTotalCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTaskSlotUsedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.task_slot.used.count metric with initial data. -func (m *metricApachedruidTaskSlotUsedCount) init() { - m.data.SetName("apachedruid.task_slot.used.count") - m.data.SetDescription("Number of busy task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.") - m.data.SetUnit("{slots}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTaskSlotUsedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", taskSlotCategoryAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTaskSlotUsedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTaskSlotUsedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTaskSlotUsedCount(cfg MetricConfig) metricApachedruidTaskSlotUsedCount { - m := metricApachedruidTaskSlotUsedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTierHistoricalCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.tier.historical.count metric with initial data. -func (m *metricApachedruidTierHistoricalCount) init() { - m.data.SetName("apachedruid.tier.historical.count") - m.data.SetDescription("Number of available historical nodes in each tier.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTierHistoricalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tierAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", tierAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTierHistoricalCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTierHistoricalCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTierHistoricalCount(cfg MetricConfig) metricApachedruidTierHistoricalCount { - m := metricApachedruidTierHistoricalCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTierReplicationFactor struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.tier.replication.factor metric with initial data. -func (m *metricApachedruidTierReplicationFactor) init() { - m.data.SetName("apachedruid.tier.replication.factor") - m.data.SetDescription("Configured maximum replication factor in each tier.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTierReplicationFactor) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tierAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", tierAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTierReplicationFactor) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTierReplicationFactor) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTierReplicationFactor(cfg MetricConfig) metricApachedruidTierReplicationFactor { - m := metricApachedruidTierReplicationFactor{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTierRequiredCapacity struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.tier.required.capacity metric with initial data. -func (m *metricApachedruidTierRequiredCapacity) init() { - m.data.SetName("apachedruid.tier.required.capacity") - m.data.SetDescription("Total capacity in bytes required in each tier.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTierRequiredCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tierAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", tierAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTierRequiredCapacity) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTierRequiredCapacity) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTierRequiredCapacity(cfg MetricConfig) metricApachedruidTierRequiredCapacity { - m := metricApachedruidTierRequiredCapacity{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidTierTotalCapacity struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.tier.total.capacity metric with initial data. -func (m *metricApachedruidTierTotalCapacity) init() { - m.data.SetName("apachedruid.tier.total.capacity") - m.data.SetDescription("Total capacity in bytes available in each tier.") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidTierTotalCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tierAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("tier", tierAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidTierTotalCapacity) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidTierTotalCapacity) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidTierTotalCapacity(cfg MetricConfig) metricApachedruidTierTotalCapacity { - m := metricApachedruidTierTotalCapacity{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidWorkerTaskFailedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.worker.task.failed.count metric with initial data. -func (m *metricApachedruidWorkerTaskFailedCount) init() { - m.data.SetName("apachedruid.worker.task.failed.count") - m.data.SetDescription("Number of failed tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidWorkerTaskFailedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", workerCategoryAttributeValue) - dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidWorkerTaskFailedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidWorkerTaskFailedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidWorkerTaskFailedCount(cfg MetricConfig) metricApachedruidWorkerTaskFailedCount { - m := metricApachedruidWorkerTaskFailedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidWorkerTaskSuccessCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.worker.task.success.count metric with initial data. -func (m *metricApachedruidWorkerTaskSuccessCount) init() { - m.data.SetName("apachedruid.worker.task.success.count") - m.data.SetDescription("Number of successful tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.") - m.data.SetUnit("{tasks}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidWorkerTaskSuccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", workerCategoryAttributeValue) - dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidWorkerTaskSuccessCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidWorkerTaskSuccessCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidWorkerTaskSuccessCount(cfg MetricConfig) metricApachedruidWorkerTaskSuccessCount { - m := metricApachedruidWorkerTaskSuccessCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidWorkerTaskSlotIdleCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.worker.task_slot.idle.count metric with initial data. -func (m *metricApachedruidWorkerTaskSlotIdleCount) init() { - m.data.SetName("apachedruid.worker.task_slot.idle.count") - m.data.SetDescription("Number of idle task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.") - m.data.SetUnit("{slots}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidWorkerTaskSlotIdleCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", workerCategoryAttributeValue) - dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidWorkerTaskSlotIdleCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidWorkerTaskSlotIdleCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidWorkerTaskSlotIdleCount(cfg MetricConfig) metricApachedruidWorkerTaskSlotIdleCount { - m := metricApachedruidWorkerTaskSlotIdleCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidWorkerTaskSlotTotalCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.worker.task_slot.total.count metric with initial data. -func (m *metricApachedruidWorkerTaskSlotTotalCount) init() { - m.data.SetName("apachedruid.worker.task_slot.total.count") - m.data.SetDescription("Number of total task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included.") - m.data.SetUnit("{slots}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidWorkerTaskSlotTotalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", workerCategoryAttributeValue) - dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidWorkerTaskSlotTotalCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidWorkerTaskSlotTotalCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidWorkerTaskSlotTotalCount(cfg MetricConfig) metricApachedruidWorkerTaskSlotTotalCount { - m := metricApachedruidWorkerTaskSlotTotalCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidWorkerTaskSlotUsedCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.worker.task_slot.used.count metric with initial data. -func (m *metricApachedruidWorkerTaskSlotUsedCount) init() { - m.data.SetName("apachedruid.worker.task_slot.used.count") - m.data.SetDescription("Number of busy task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included.") - m.data.SetUnit("{slots}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricApachedruidWorkerTaskSlotUsedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("category", workerCategoryAttributeValue) - dp.Attributes().PutStr("worker_version", workerVersionAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidWorkerTaskSlotUsedCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidWorkerTaskSlotUsedCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidWorkerTaskSlotUsedCount(cfg MetricConfig) metricApachedruidWorkerTaskSlotUsedCount { - m := metricApachedruidWorkerTaskSlotUsedCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidZkConnected struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.zk.connected metric with initial data. -func (m *metricApachedruidZkConnected) init() { - m.data.SetName("apachedruid.zk.connected") - m.data.SetDescription("Indicator of connection status. `1` for connected, `0` for disconnected. Emitted once per monitor period.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidZkConnected) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidZkConnected) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidZkConnected) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidZkConnected(cfg MetricConfig) metricApachedruidZkConnected { - m := metricApachedruidZkConnected{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricApachedruidZkReconnectTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills apachedruid.zk.reconnect.time metric with initial data. -func (m *metricApachedruidZkReconnectTime) init() { - m.data.SetName("apachedruid.zk.reconnect.time") - m.data.SetDescription("Amount of time, in milliseconds, that a server was disconnected from ZooKeeper before reconnecting. Emitted on reconnection. Not emitted if connection to ZooKeeper is permanently lost, because in this case, there is no reconnection.") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() -} - -func (m *metricApachedruidZkReconnectTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricApachedruidZkReconnectTime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApachedruidZkReconnectTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricApachedruidZkReconnectTime(cfg MetricConfig) metricApachedruidZkReconnectTime { - m := metricApachedruidZkReconnectTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations -// required to produce metric representation defined in metadata and user config. -type MetricsBuilder struct { - config MetricsBuilderConfig // config of the metrics builder. - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information. - metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis - metricApachedruidCompactTaskCount metricApachedruidCompactTaskCount - metricApachedruidCompactTaskAvailableSlotCount metricApachedruidCompactTaskAvailableSlotCount - metricApachedruidCompactTaskMaxSlotCount metricApachedruidCompactTaskMaxSlotCount - metricApachedruidCoordinatorGlobalTime metricApachedruidCoordinatorGlobalTime - metricApachedruidCoordinatorTime metricApachedruidCoordinatorTime - metricApachedruidIngestBytesReceived metricApachedruidIngestBytesReceived - metricApachedruidIngestCount metricApachedruidIngestCount - metricApachedruidIngestEventsBuffered metricApachedruidIngestEventsBuffered - metricApachedruidIngestEventsDuplicate metricApachedruidIngestEventsDuplicate - metricApachedruidIngestEventsMessageGap metricApachedruidIngestEventsMessageGap - metricApachedruidIngestEventsProcessed metricApachedruidIngestEventsProcessed - metricApachedruidIngestEventsProcessedWithError metricApachedruidIngestEventsProcessedWithError - metricApachedruidIngestEventsThrownAway metricApachedruidIngestEventsThrownAway - metricApachedruidIngestEventsUnparseable metricApachedruidIngestEventsUnparseable - metricApachedruidIngestHandoffCount metricApachedruidIngestHandoffCount - metricApachedruidIngestHandoffFailed metricApachedruidIngestHandoffFailed - metricApachedruidIngestHandoffTime metricApachedruidIngestHandoffTime - metricApachedruidIngestInputBytes metricApachedruidIngestInputBytes - metricApachedruidIngestKafkaAvgLag metricApachedruidIngestKafkaAvgLag - metricApachedruidIngestKafkaLag metricApachedruidIngestKafkaLag - metricApachedruidIngestKafkaMaxLag metricApachedruidIngestKafkaMaxLag - metricApachedruidIngestKafkaPartitionLag metricApachedruidIngestKafkaPartitionLag - metricApachedruidIngestKinesisAvgLagTime metricApachedruidIngestKinesisAvgLagTime - metricApachedruidIngestKinesisLagTime metricApachedruidIngestKinesisLagTime - metricApachedruidIngestKinesisMaxLagTime metricApachedruidIngestKinesisMaxLagTime - metricApachedruidIngestKinesisPartitionLagTime metricApachedruidIngestKinesisPartitionLagTime - metricApachedruidIngestMergeCPU metricApachedruidIngestMergeCPU - metricApachedruidIngestMergeTime metricApachedruidIngestMergeTime - metricApachedruidIngestNoticesQueueSize metricApachedruidIngestNoticesQueueSize - metricApachedruidIngestNoticesTime metricApachedruidIngestNoticesTime - metricApachedruidIngestPauseTime metricApachedruidIngestPauseTime - metricApachedruidIngestPersistsBackPressure metricApachedruidIngestPersistsBackPressure - metricApachedruidIngestPersistsCount metricApachedruidIngestPersistsCount - metricApachedruidIngestPersistsCPU metricApachedruidIngestPersistsCPU - metricApachedruidIngestPersistsFailed metricApachedruidIngestPersistsFailed - metricApachedruidIngestPersistsTime metricApachedruidIngestPersistsTime - metricApachedruidIngestRowsOutput metricApachedruidIngestRowsOutput - metricApachedruidIngestSegmentsCount metricApachedruidIngestSegmentsCount - metricApachedruidIngestShuffleBytes metricApachedruidIngestShuffleBytes - metricApachedruidIngestShuffleRequests metricApachedruidIngestShuffleRequests - metricApachedruidIngestSinkCount metricApachedruidIngestSinkCount - metricApachedruidIngestTombstonesCount metricApachedruidIngestTombstonesCount - metricApachedruidIntervalCompactedCount metricApachedruidIntervalCompactedCount - metricApachedruidIntervalSkipCompactCount metricApachedruidIntervalSkipCompactCount - metricApachedruidIntervalWaitCompactCount metricApachedruidIntervalWaitCompactCount - metricApachedruidJettyNumOpenConnections metricApachedruidJettyNumOpenConnections - metricApachedruidJettyThreadPoolBusy metricApachedruidJettyThreadPoolBusy - metricApachedruidJettyThreadPoolIdle metricApachedruidJettyThreadPoolIdle - metricApachedruidJettyThreadPoolIsLowOnThreads metricApachedruidJettyThreadPoolIsLowOnThreads - metricApachedruidJettyThreadPoolMax metricApachedruidJettyThreadPoolMax - metricApachedruidJettyThreadPoolMin metricApachedruidJettyThreadPoolMin - metricApachedruidJettyThreadPoolQueueSize metricApachedruidJettyThreadPoolQueueSize - metricApachedruidJettyThreadPoolTotal metricApachedruidJettyThreadPoolTotal - metricApachedruidJvmBufferpoolCapacity metricApachedruidJvmBufferpoolCapacity - metricApachedruidJvmBufferpoolCount metricApachedruidJvmBufferpoolCount - metricApachedruidJvmBufferpoolUsed metricApachedruidJvmBufferpoolUsed - metricApachedruidJvmGcCount metricApachedruidJvmGcCount - metricApachedruidJvmGcCPU metricApachedruidJvmGcCPU - metricApachedruidJvmMemCommitted metricApachedruidJvmMemCommitted - metricApachedruidJvmMemInit metricApachedruidJvmMemInit - metricApachedruidJvmMemMax metricApachedruidJvmMemMax - metricApachedruidJvmMemUsed metricApachedruidJvmMemUsed - metricApachedruidJvmPoolCommitted metricApachedruidJvmPoolCommitted - metricApachedruidJvmPoolInit metricApachedruidJvmPoolInit - metricApachedruidJvmPoolMax metricApachedruidJvmPoolMax - metricApachedruidJvmPoolUsed metricApachedruidJvmPoolUsed - metricApachedruidKillPendingSegmentsCount metricApachedruidKillPendingSegmentsCount - metricApachedruidKillTaskCount metricApachedruidKillTaskCount - metricApachedruidKillTaskAvailableSlotCount metricApachedruidKillTaskAvailableSlotCount - metricApachedruidKillTaskMaxSlotCount metricApachedruidKillTaskMaxSlotCount - metricApachedruidMergeBufferPendingRequests metricApachedruidMergeBufferPendingRequests - metricApachedruidMetadataKillAuditCount metricApachedruidMetadataKillAuditCount - metricApachedruidMetadataKillCompactionCount metricApachedruidMetadataKillCompactionCount - metricApachedruidMetadataKillDatasourceCount metricApachedruidMetadataKillDatasourceCount - metricApachedruidMetadataKillRuleCount metricApachedruidMetadataKillRuleCount - metricApachedruidMetadataKillSupervisorCount metricApachedruidMetadataKillSupervisorCount - metricApachedruidMetadatacacheInitTime metricApachedruidMetadatacacheInitTime - metricApachedruidMetadatacacheRefreshCount metricApachedruidMetadatacacheRefreshCount - metricApachedruidMetadatacacheRefreshTime metricApachedruidMetadatacacheRefreshTime - metricApachedruidQueryByteLimitExceededCount metricApachedruidQueryByteLimitExceededCount - metricApachedruidQueryBytes metricApachedruidQueryBytes - metricApachedruidQueryCacheDeltaAverageBytes metricApachedruidQueryCacheDeltaAverageBytes - metricApachedruidQueryCacheDeltaErrors metricApachedruidQueryCacheDeltaErrors - metricApachedruidQueryCacheDeltaEvictions metricApachedruidQueryCacheDeltaEvictions - metricApachedruidQueryCacheDeltaHitRate metricApachedruidQueryCacheDeltaHitRate - metricApachedruidQueryCacheDeltaHits metricApachedruidQueryCacheDeltaHits - metricApachedruidQueryCacheDeltaMisses metricApachedruidQueryCacheDeltaMisses - metricApachedruidQueryCacheDeltaNumEntries metricApachedruidQueryCacheDeltaNumEntries - metricApachedruidQueryCacheDeltaPutError metricApachedruidQueryCacheDeltaPutError - metricApachedruidQueryCacheDeltaPutOk metricApachedruidQueryCacheDeltaPutOk - metricApachedruidQueryCacheDeltaPutOversized metricApachedruidQueryCacheDeltaPutOversized - metricApachedruidQueryCacheDeltaSizeBytes metricApachedruidQueryCacheDeltaSizeBytes - metricApachedruidQueryCacheDeltaTimeouts metricApachedruidQueryCacheDeltaTimeouts - metricApachedruidQueryCacheMemcachedDelta metricApachedruidQueryCacheMemcachedDelta - metricApachedruidQueryCacheMemcachedTotal metricApachedruidQueryCacheMemcachedTotal - metricApachedruidQueryCacheTotalAverageBytes metricApachedruidQueryCacheTotalAverageBytes - metricApachedruidQueryCacheTotalErrors metricApachedruidQueryCacheTotalErrors - metricApachedruidQueryCacheTotalEvictions metricApachedruidQueryCacheTotalEvictions - metricApachedruidQueryCacheTotalHitRate metricApachedruidQueryCacheTotalHitRate - metricApachedruidQueryCacheTotalHits metricApachedruidQueryCacheTotalHits - metricApachedruidQueryCacheTotalMisses metricApachedruidQueryCacheTotalMisses - metricApachedruidQueryCacheTotalNumEntries metricApachedruidQueryCacheTotalNumEntries - metricApachedruidQueryCacheTotalPutError metricApachedruidQueryCacheTotalPutError - metricApachedruidQueryCacheTotalPutOk metricApachedruidQueryCacheTotalPutOk - metricApachedruidQueryCacheTotalPutOversized metricApachedruidQueryCacheTotalPutOversized - metricApachedruidQueryCacheTotalSizeBytes metricApachedruidQueryCacheTotalSizeBytes - metricApachedruidQueryCacheTotalTimeouts metricApachedruidQueryCacheTotalTimeouts - metricApachedruidQueryCount metricApachedruidQueryCount - metricApachedruidQueryCPUTime metricApachedruidQueryCPUTime - metricApachedruidQueryFailedCount metricApachedruidQueryFailedCount - metricApachedruidQueryInterruptedCount metricApachedruidQueryInterruptedCount - metricApachedruidQueryNodeBackpressure metricApachedruidQueryNodeBackpressure - metricApachedruidQueryNodeBytes metricApachedruidQueryNodeBytes - metricApachedruidQueryNodeTime metricApachedruidQueryNodeTime - metricApachedruidQueryNodeTtfb metricApachedruidQueryNodeTtfb - metricApachedruidQueryPriority metricApachedruidQueryPriority - metricApachedruidQueryRowLimitExceededCount metricApachedruidQueryRowLimitExceededCount - metricApachedruidQuerySegmentTime metricApachedruidQuerySegmentTime - metricApachedruidQuerySegmentAndCacheTime metricApachedruidQuerySegmentAndCacheTime - metricApachedruidQuerySegmentsCount metricApachedruidQuerySegmentsCount - metricApachedruidQuerySuccessCount metricApachedruidQuerySuccessCount - metricApachedruidQueryTime metricApachedruidQueryTime - metricApachedruidQueryTimeoutCount metricApachedruidQueryTimeoutCount - metricApachedruidQueryWaitTime metricApachedruidQueryWaitTime - metricApachedruidSegmentAddedBytes metricApachedruidSegmentAddedBytes - metricApachedruidSegmentAssignSkippedCount metricApachedruidSegmentAssignSkippedCount - metricApachedruidSegmentAssignedCount metricApachedruidSegmentAssignedCount - metricApachedruidSegmentCompactedBytes metricApachedruidSegmentCompactedBytes - metricApachedruidSegmentCompactedCount metricApachedruidSegmentCompactedCount - metricApachedruidSegmentCount metricApachedruidSegmentCount - metricApachedruidSegmentDeletedCount metricApachedruidSegmentDeletedCount - metricApachedruidSegmentDropQueueCount metricApachedruidSegmentDropQueueCount - metricApachedruidSegmentDropSkippedCount metricApachedruidSegmentDropSkippedCount - metricApachedruidSegmentDroppedCount metricApachedruidSegmentDroppedCount - metricApachedruidSegmentLoadQueueAssigned metricApachedruidSegmentLoadQueueAssigned - metricApachedruidSegmentLoadQueueCancelled metricApachedruidSegmentLoadQueueCancelled - metricApachedruidSegmentLoadQueueCount metricApachedruidSegmentLoadQueueCount - metricApachedruidSegmentLoadQueueFailed metricApachedruidSegmentLoadQueueFailed - metricApachedruidSegmentLoadQueueSize metricApachedruidSegmentLoadQueueSize - metricApachedruidSegmentLoadQueueSuccess metricApachedruidSegmentLoadQueueSuccess - metricApachedruidSegmentMax metricApachedruidSegmentMax - metricApachedruidSegmentMoveSkippedCount metricApachedruidSegmentMoveSkippedCount - metricApachedruidSegmentMovedBytes metricApachedruidSegmentMovedBytes - metricApachedruidSegmentMovedCount metricApachedruidSegmentMovedCount - metricApachedruidSegmentNukedBytes metricApachedruidSegmentNukedBytes - metricApachedruidSegmentOverShadowedCount metricApachedruidSegmentOverShadowedCount - metricApachedruidSegmentPendingDelete metricApachedruidSegmentPendingDelete - metricApachedruidSegmentRowCountAvg metricApachedruidSegmentRowCountAvg - metricApachedruidSegmentRowCountRangeCount metricApachedruidSegmentRowCountRangeCount - metricApachedruidSegmentScanActive metricApachedruidSegmentScanActive - metricApachedruidSegmentScanPending metricApachedruidSegmentScanPending - metricApachedruidSegmentSize metricApachedruidSegmentSize - metricApachedruidSegmentSkipCompactBytes metricApachedruidSegmentSkipCompactBytes - metricApachedruidSegmentSkipCompactCount metricApachedruidSegmentSkipCompactCount - metricApachedruidSegmentUnavailableCount metricApachedruidSegmentUnavailableCount - metricApachedruidSegmentUnderReplicatedCount metricApachedruidSegmentUnderReplicatedCount - metricApachedruidSegmentUnneededCount metricApachedruidSegmentUnneededCount - metricApachedruidSegmentUsed metricApachedruidSegmentUsed - metricApachedruidSegmentUsedPercent metricApachedruidSegmentUsedPercent - metricApachedruidSegmentWaitCompactBytes metricApachedruidSegmentWaitCompactBytes - metricApachedruidSegmentWaitCompactCount metricApachedruidSegmentWaitCompactCount - metricApachedruidServerviewInitTime metricApachedruidServerviewInitTime - metricApachedruidServerviewSyncHealthy metricApachedruidServerviewSyncHealthy - metricApachedruidServerviewSyncUnstableTime metricApachedruidServerviewSyncUnstableTime - metricApachedruidSQLQueryBytes metricApachedruidSQLQueryBytes - metricApachedruidSQLQueryPlanningTimeMs metricApachedruidSQLQueryPlanningTimeMs - metricApachedruidSQLQueryTime metricApachedruidSQLQueryTime - metricApachedruidSubqueryByteLimitCount metricApachedruidSubqueryByteLimitCount - metricApachedruidSubqueryFallbackCount metricApachedruidSubqueryFallbackCount - metricApachedruidSubqueryFallbackInsufficientTypeCount metricApachedruidSubqueryFallbackInsufficientTypeCount - metricApachedruidSubqueryFallbackUnknownReasonCount metricApachedruidSubqueryFallbackUnknownReasonCount - metricApachedruidSubqueryRowLimitCount metricApachedruidSubqueryRowLimitCount - metricApachedruidSysCPU metricApachedruidSysCPU - metricApachedruidSysDiskQueue metricApachedruidSysDiskQueue - metricApachedruidSysDiskReadCount metricApachedruidSysDiskReadCount - metricApachedruidSysDiskReadSize metricApachedruidSysDiskReadSize - metricApachedruidSysDiskTransferTime metricApachedruidSysDiskTransferTime - metricApachedruidSysDiskWriteCount metricApachedruidSysDiskWriteCount - metricApachedruidSysDiskWriteSize metricApachedruidSysDiskWriteSize - metricApachedruidSysFsFilesCount metricApachedruidSysFsFilesCount - metricApachedruidSysFsFilesFree metricApachedruidSysFsFilesFree - metricApachedruidSysFsMax metricApachedruidSysFsMax - metricApachedruidSysFsUsed metricApachedruidSysFsUsed - metricApachedruidSysLa1 metricApachedruidSysLa1 - metricApachedruidSysLa15 metricApachedruidSysLa15 - metricApachedruidSysLa5 metricApachedruidSysLa5 - metricApachedruidSysMemFree metricApachedruidSysMemFree - metricApachedruidSysMemMax metricApachedruidSysMemMax - metricApachedruidSysMemUsed metricApachedruidSysMemUsed - metricApachedruidSysNetReadDropped metricApachedruidSysNetReadDropped - metricApachedruidSysNetReadErrors metricApachedruidSysNetReadErrors - metricApachedruidSysNetReadPackets metricApachedruidSysNetReadPackets - metricApachedruidSysNetReadSize metricApachedruidSysNetReadSize - metricApachedruidSysNetWriteCollisions metricApachedruidSysNetWriteCollisions - metricApachedruidSysNetWriteErrors metricApachedruidSysNetWriteErrors - metricApachedruidSysNetWritePackets metricApachedruidSysNetWritePackets - metricApachedruidSysNetWriteSize metricApachedruidSysNetWriteSize - metricApachedruidSysStorageUsed metricApachedruidSysStorageUsed - metricApachedruidSysSwapFree metricApachedruidSysSwapFree - metricApachedruidSysSwapMax metricApachedruidSysSwapMax - metricApachedruidSysSwapPageIn metricApachedruidSysSwapPageIn - metricApachedruidSysSwapPageOut metricApachedruidSysSwapPageOut - metricApachedruidSysTcpv4ActiveOpens metricApachedruidSysTcpv4ActiveOpens - metricApachedruidSysTcpv4AttemptFails metricApachedruidSysTcpv4AttemptFails - metricApachedruidSysTcpv4EstabResets metricApachedruidSysTcpv4EstabResets - metricApachedruidSysTcpv4InErrs metricApachedruidSysTcpv4InErrs - metricApachedruidSysTcpv4InSegs metricApachedruidSysTcpv4InSegs - metricApachedruidSysTcpv4OutRsts metricApachedruidSysTcpv4OutRsts - metricApachedruidSysTcpv4OutSegs metricApachedruidSysTcpv4OutSegs - metricApachedruidSysTcpv4PassiveOpens metricApachedruidSysTcpv4PassiveOpens - metricApachedruidSysTcpv4RetransSegs metricApachedruidSysTcpv4RetransSegs - metricApachedruidSysUptime metricApachedruidSysUptime - metricApachedruidTaskActionBatchAttempts metricApachedruidTaskActionBatchAttempts - metricApachedruidTaskActionBatchQueueTime metricApachedruidTaskActionBatchQueueTime - metricApachedruidTaskActionBatchRunTime metricApachedruidTaskActionBatchRunTime - metricApachedruidTaskActionBatchSize metricApachedruidTaskActionBatchSize - metricApachedruidTaskActionFailedCount metricApachedruidTaskActionFailedCount - metricApachedruidTaskActionLogTime metricApachedruidTaskActionLogTime - metricApachedruidTaskActionRunTime metricApachedruidTaskActionRunTime - metricApachedruidTaskActionSuccessCount metricApachedruidTaskActionSuccessCount - metricApachedruidTaskFailedCount metricApachedruidTaskFailedCount - metricApachedruidTaskPendingCount metricApachedruidTaskPendingCount - metricApachedruidTaskPendingTime metricApachedruidTaskPendingTime - metricApachedruidTaskRunTime metricApachedruidTaskRunTime - metricApachedruidTaskRunningCount metricApachedruidTaskRunningCount - metricApachedruidTaskSegmentAvailabilityWaitTime metricApachedruidTaskSegmentAvailabilityWaitTime - metricApachedruidTaskSuccessCount metricApachedruidTaskSuccessCount - metricApachedruidTaskWaitingCount metricApachedruidTaskWaitingCount - metricApachedruidTaskSlotBlacklistedCount metricApachedruidTaskSlotBlacklistedCount - metricApachedruidTaskSlotIdleCount metricApachedruidTaskSlotIdleCount - metricApachedruidTaskSlotLazyCount metricApachedruidTaskSlotLazyCount - metricApachedruidTaskSlotTotalCount metricApachedruidTaskSlotTotalCount - metricApachedruidTaskSlotUsedCount metricApachedruidTaskSlotUsedCount - metricApachedruidTierHistoricalCount metricApachedruidTierHistoricalCount - metricApachedruidTierReplicationFactor metricApachedruidTierReplicationFactor - metricApachedruidTierRequiredCapacity metricApachedruidTierRequiredCapacity - metricApachedruidTierTotalCapacity metricApachedruidTierTotalCapacity - metricApachedruidWorkerTaskFailedCount metricApachedruidWorkerTaskFailedCount - metricApachedruidWorkerTaskSuccessCount metricApachedruidWorkerTaskSuccessCount - metricApachedruidWorkerTaskSlotIdleCount metricApachedruidWorkerTaskSlotIdleCount - metricApachedruidWorkerTaskSlotTotalCount metricApachedruidWorkerTaskSlotTotalCount - metricApachedruidWorkerTaskSlotUsedCount metricApachedruidWorkerTaskSlotUsedCount - metricApachedruidZkConnected metricApachedruidZkConnected - metricApachedruidZkReconnectTime metricApachedruidZkReconnectTime -} - -// metricBuilderOption applies changes to default metrics builder. -type metricBuilderOption func(*MetricsBuilder) - -// WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { - return func(mb *MetricsBuilder) { - mb.startTime = startTime - } -} - -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { - mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis: newMetricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis(mbc.Metrics.ApachedruidCompactSegmentAnalyzerFetchAndProcessMillis), - metricApachedruidCompactTaskCount: newMetricApachedruidCompactTaskCount(mbc.Metrics.ApachedruidCompactTaskCount), - metricApachedruidCompactTaskAvailableSlotCount: newMetricApachedruidCompactTaskAvailableSlotCount(mbc.Metrics.ApachedruidCompactTaskAvailableSlotCount), - metricApachedruidCompactTaskMaxSlotCount: newMetricApachedruidCompactTaskMaxSlotCount(mbc.Metrics.ApachedruidCompactTaskMaxSlotCount), - metricApachedruidCoordinatorGlobalTime: newMetricApachedruidCoordinatorGlobalTime(mbc.Metrics.ApachedruidCoordinatorGlobalTime), - metricApachedruidCoordinatorTime: newMetricApachedruidCoordinatorTime(mbc.Metrics.ApachedruidCoordinatorTime), - metricApachedruidIngestBytesReceived: newMetricApachedruidIngestBytesReceived(mbc.Metrics.ApachedruidIngestBytesReceived), - metricApachedruidIngestCount: newMetricApachedruidIngestCount(mbc.Metrics.ApachedruidIngestCount), - metricApachedruidIngestEventsBuffered: newMetricApachedruidIngestEventsBuffered(mbc.Metrics.ApachedruidIngestEventsBuffered), - metricApachedruidIngestEventsDuplicate: newMetricApachedruidIngestEventsDuplicate(mbc.Metrics.ApachedruidIngestEventsDuplicate), - metricApachedruidIngestEventsMessageGap: newMetricApachedruidIngestEventsMessageGap(mbc.Metrics.ApachedruidIngestEventsMessageGap), - metricApachedruidIngestEventsProcessed: newMetricApachedruidIngestEventsProcessed(mbc.Metrics.ApachedruidIngestEventsProcessed), - metricApachedruidIngestEventsProcessedWithError: newMetricApachedruidIngestEventsProcessedWithError(mbc.Metrics.ApachedruidIngestEventsProcessedWithError), - metricApachedruidIngestEventsThrownAway: newMetricApachedruidIngestEventsThrownAway(mbc.Metrics.ApachedruidIngestEventsThrownAway), - metricApachedruidIngestEventsUnparseable: newMetricApachedruidIngestEventsUnparseable(mbc.Metrics.ApachedruidIngestEventsUnparseable), - metricApachedruidIngestHandoffCount: newMetricApachedruidIngestHandoffCount(mbc.Metrics.ApachedruidIngestHandoffCount), - metricApachedruidIngestHandoffFailed: newMetricApachedruidIngestHandoffFailed(mbc.Metrics.ApachedruidIngestHandoffFailed), - metricApachedruidIngestHandoffTime: newMetricApachedruidIngestHandoffTime(mbc.Metrics.ApachedruidIngestHandoffTime), - metricApachedruidIngestInputBytes: newMetricApachedruidIngestInputBytes(mbc.Metrics.ApachedruidIngestInputBytes), - metricApachedruidIngestKafkaAvgLag: newMetricApachedruidIngestKafkaAvgLag(mbc.Metrics.ApachedruidIngestKafkaAvgLag), - metricApachedruidIngestKafkaLag: newMetricApachedruidIngestKafkaLag(mbc.Metrics.ApachedruidIngestKafkaLag), - metricApachedruidIngestKafkaMaxLag: newMetricApachedruidIngestKafkaMaxLag(mbc.Metrics.ApachedruidIngestKafkaMaxLag), - metricApachedruidIngestKafkaPartitionLag: newMetricApachedruidIngestKafkaPartitionLag(mbc.Metrics.ApachedruidIngestKafkaPartitionLag), - metricApachedruidIngestKinesisAvgLagTime: newMetricApachedruidIngestKinesisAvgLagTime(mbc.Metrics.ApachedruidIngestKinesisAvgLagTime), - metricApachedruidIngestKinesisLagTime: newMetricApachedruidIngestKinesisLagTime(mbc.Metrics.ApachedruidIngestKinesisLagTime), - metricApachedruidIngestKinesisMaxLagTime: newMetricApachedruidIngestKinesisMaxLagTime(mbc.Metrics.ApachedruidIngestKinesisMaxLagTime), - metricApachedruidIngestKinesisPartitionLagTime: newMetricApachedruidIngestKinesisPartitionLagTime(mbc.Metrics.ApachedruidIngestKinesisPartitionLagTime), - metricApachedruidIngestMergeCPU: newMetricApachedruidIngestMergeCPU(mbc.Metrics.ApachedruidIngestMergeCPU), - metricApachedruidIngestMergeTime: newMetricApachedruidIngestMergeTime(mbc.Metrics.ApachedruidIngestMergeTime), - metricApachedruidIngestNoticesQueueSize: newMetricApachedruidIngestNoticesQueueSize(mbc.Metrics.ApachedruidIngestNoticesQueueSize), - metricApachedruidIngestNoticesTime: newMetricApachedruidIngestNoticesTime(mbc.Metrics.ApachedruidIngestNoticesTime), - metricApachedruidIngestPauseTime: newMetricApachedruidIngestPauseTime(mbc.Metrics.ApachedruidIngestPauseTime), - metricApachedruidIngestPersistsBackPressure: newMetricApachedruidIngestPersistsBackPressure(mbc.Metrics.ApachedruidIngestPersistsBackPressure), - metricApachedruidIngestPersistsCount: newMetricApachedruidIngestPersistsCount(mbc.Metrics.ApachedruidIngestPersistsCount), - metricApachedruidIngestPersistsCPU: newMetricApachedruidIngestPersistsCPU(mbc.Metrics.ApachedruidIngestPersistsCPU), - metricApachedruidIngestPersistsFailed: newMetricApachedruidIngestPersistsFailed(mbc.Metrics.ApachedruidIngestPersistsFailed), - metricApachedruidIngestPersistsTime: newMetricApachedruidIngestPersistsTime(mbc.Metrics.ApachedruidIngestPersistsTime), - metricApachedruidIngestRowsOutput: newMetricApachedruidIngestRowsOutput(mbc.Metrics.ApachedruidIngestRowsOutput), - metricApachedruidIngestSegmentsCount: newMetricApachedruidIngestSegmentsCount(mbc.Metrics.ApachedruidIngestSegmentsCount), - metricApachedruidIngestShuffleBytes: newMetricApachedruidIngestShuffleBytes(mbc.Metrics.ApachedruidIngestShuffleBytes), - metricApachedruidIngestShuffleRequests: newMetricApachedruidIngestShuffleRequests(mbc.Metrics.ApachedruidIngestShuffleRequests), - metricApachedruidIngestSinkCount: newMetricApachedruidIngestSinkCount(mbc.Metrics.ApachedruidIngestSinkCount), - metricApachedruidIngestTombstonesCount: newMetricApachedruidIngestTombstonesCount(mbc.Metrics.ApachedruidIngestTombstonesCount), - metricApachedruidIntervalCompactedCount: newMetricApachedruidIntervalCompactedCount(mbc.Metrics.ApachedruidIntervalCompactedCount), - metricApachedruidIntervalSkipCompactCount: newMetricApachedruidIntervalSkipCompactCount(mbc.Metrics.ApachedruidIntervalSkipCompactCount), - metricApachedruidIntervalWaitCompactCount: newMetricApachedruidIntervalWaitCompactCount(mbc.Metrics.ApachedruidIntervalWaitCompactCount), - metricApachedruidJettyNumOpenConnections: newMetricApachedruidJettyNumOpenConnections(mbc.Metrics.ApachedruidJettyNumOpenConnections), - metricApachedruidJettyThreadPoolBusy: newMetricApachedruidJettyThreadPoolBusy(mbc.Metrics.ApachedruidJettyThreadPoolBusy), - metricApachedruidJettyThreadPoolIdle: newMetricApachedruidJettyThreadPoolIdle(mbc.Metrics.ApachedruidJettyThreadPoolIdle), - metricApachedruidJettyThreadPoolIsLowOnThreads: newMetricApachedruidJettyThreadPoolIsLowOnThreads(mbc.Metrics.ApachedruidJettyThreadPoolIsLowOnThreads), - metricApachedruidJettyThreadPoolMax: newMetricApachedruidJettyThreadPoolMax(mbc.Metrics.ApachedruidJettyThreadPoolMax), - metricApachedruidJettyThreadPoolMin: newMetricApachedruidJettyThreadPoolMin(mbc.Metrics.ApachedruidJettyThreadPoolMin), - metricApachedruidJettyThreadPoolQueueSize: newMetricApachedruidJettyThreadPoolQueueSize(mbc.Metrics.ApachedruidJettyThreadPoolQueueSize), - metricApachedruidJettyThreadPoolTotal: newMetricApachedruidJettyThreadPoolTotal(mbc.Metrics.ApachedruidJettyThreadPoolTotal), - metricApachedruidJvmBufferpoolCapacity: newMetricApachedruidJvmBufferpoolCapacity(mbc.Metrics.ApachedruidJvmBufferpoolCapacity), - metricApachedruidJvmBufferpoolCount: newMetricApachedruidJvmBufferpoolCount(mbc.Metrics.ApachedruidJvmBufferpoolCount), - metricApachedruidJvmBufferpoolUsed: newMetricApachedruidJvmBufferpoolUsed(mbc.Metrics.ApachedruidJvmBufferpoolUsed), - metricApachedruidJvmGcCount: newMetricApachedruidJvmGcCount(mbc.Metrics.ApachedruidJvmGcCount), - metricApachedruidJvmGcCPU: newMetricApachedruidJvmGcCPU(mbc.Metrics.ApachedruidJvmGcCPU), - metricApachedruidJvmMemCommitted: newMetricApachedruidJvmMemCommitted(mbc.Metrics.ApachedruidJvmMemCommitted), - metricApachedruidJvmMemInit: newMetricApachedruidJvmMemInit(mbc.Metrics.ApachedruidJvmMemInit), - metricApachedruidJvmMemMax: newMetricApachedruidJvmMemMax(mbc.Metrics.ApachedruidJvmMemMax), - metricApachedruidJvmMemUsed: newMetricApachedruidJvmMemUsed(mbc.Metrics.ApachedruidJvmMemUsed), - metricApachedruidJvmPoolCommitted: newMetricApachedruidJvmPoolCommitted(mbc.Metrics.ApachedruidJvmPoolCommitted), - metricApachedruidJvmPoolInit: newMetricApachedruidJvmPoolInit(mbc.Metrics.ApachedruidJvmPoolInit), - metricApachedruidJvmPoolMax: newMetricApachedruidJvmPoolMax(mbc.Metrics.ApachedruidJvmPoolMax), - metricApachedruidJvmPoolUsed: newMetricApachedruidJvmPoolUsed(mbc.Metrics.ApachedruidJvmPoolUsed), - metricApachedruidKillPendingSegmentsCount: newMetricApachedruidKillPendingSegmentsCount(mbc.Metrics.ApachedruidKillPendingSegmentsCount), - metricApachedruidKillTaskCount: newMetricApachedruidKillTaskCount(mbc.Metrics.ApachedruidKillTaskCount), - metricApachedruidKillTaskAvailableSlotCount: newMetricApachedruidKillTaskAvailableSlotCount(mbc.Metrics.ApachedruidKillTaskAvailableSlotCount), - metricApachedruidKillTaskMaxSlotCount: newMetricApachedruidKillTaskMaxSlotCount(mbc.Metrics.ApachedruidKillTaskMaxSlotCount), - metricApachedruidMergeBufferPendingRequests: newMetricApachedruidMergeBufferPendingRequests(mbc.Metrics.ApachedruidMergeBufferPendingRequests), - metricApachedruidMetadataKillAuditCount: newMetricApachedruidMetadataKillAuditCount(mbc.Metrics.ApachedruidMetadataKillAuditCount), - metricApachedruidMetadataKillCompactionCount: newMetricApachedruidMetadataKillCompactionCount(mbc.Metrics.ApachedruidMetadataKillCompactionCount), - metricApachedruidMetadataKillDatasourceCount: newMetricApachedruidMetadataKillDatasourceCount(mbc.Metrics.ApachedruidMetadataKillDatasourceCount), - metricApachedruidMetadataKillRuleCount: newMetricApachedruidMetadataKillRuleCount(mbc.Metrics.ApachedruidMetadataKillRuleCount), - metricApachedruidMetadataKillSupervisorCount: newMetricApachedruidMetadataKillSupervisorCount(mbc.Metrics.ApachedruidMetadataKillSupervisorCount), - metricApachedruidMetadatacacheInitTime: newMetricApachedruidMetadatacacheInitTime(mbc.Metrics.ApachedruidMetadatacacheInitTime), - metricApachedruidMetadatacacheRefreshCount: newMetricApachedruidMetadatacacheRefreshCount(mbc.Metrics.ApachedruidMetadatacacheRefreshCount), - metricApachedruidMetadatacacheRefreshTime: newMetricApachedruidMetadatacacheRefreshTime(mbc.Metrics.ApachedruidMetadatacacheRefreshTime), - metricApachedruidQueryByteLimitExceededCount: newMetricApachedruidQueryByteLimitExceededCount(mbc.Metrics.ApachedruidQueryByteLimitExceededCount), - metricApachedruidQueryBytes: newMetricApachedruidQueryBytes(mbc.Metrics.ApachedruidQueryBytes), - metricApachedruidQueryCacheDeltaAverageBytes: newMetricApachedruidQueryCacheDeltaAverageBytes(mbc.Metrics.ApachedruidQueryCacheDeltaAverageBytes), - metricApachedruidQueryCacheDeltaErrors: newMetricApachedruidQueryCacheDeltaErrors(mbc.Metrics.ApachedruidQueryCacheDeltaErrors), - metricApachedruidQueryCacheDeltaEvictions: newMetricApachedruidQueryCacheDeltaEvictions(mbc.Metrics.ApachedruidQueryCacheDeltaEvictions), - metricApachedruidQueryCacheDeltaHitRate: newMetricApachedruidQueryCacheDeltaHitRate(mbc.Metrics.ApachedruidQueryCacheDeltaHitRate), - metricApachedruidQueryCacheDeltaHits: newMetricApachedruidQueryCacheDeltaHits(mbc.Metrics.ApachedruidQueryCacheDeltaHits), - metricApachedruidQueryCacheDeltaMisses: newMetricApachedruidQueryCacheDeltaMisses(mbc.Metrics.ApachedruidQueryCacheDeltaMisses), - metricApachedruidQueryCacheDeltaNumEntries: newMetricApachedruidQueryCacheDeltaNumEntries(mbc.Metrics.ApachedruidQueryCacheDeltaNumEntries), - metricApachedruidQueryCacheDeltaPutError: newMetricApachedruidQueryCacheDeltaPutError(mbc.Metrics.ApachedruidQueryCacheDeltaPutError), - metricApachedruidQueryCacheDeltaPutOk: newMetricApachedruidQueryCacheDeltaPutOk(mbc.Metrics.ApachedruidQueryCacheDeltaPutOk), - metricApachedruidQueryCacheDeltaPutOversized: newMetricApachedruidQueryCacheDeltaPutOversized(mbc.Metrics.ApachedruidQueryCacheDeltaPutOversized), - metricApachedruidQueryCacheDeltaSizeBytes: newMetricApachedruidQueryCacheDeltaSizeBytes(mbc.Metrics.ApachedruidQueryCacheDeltaSizeBytes), - metricApachedruidQueryCacheDeltaTimeouts: newMetricApachedruidQueryCacheDeltaTimeouts(mbc.Metrics.ApachedruidQueryCacheDeltaTimeouts), - metricApachedruidQueryCacheMemcachedDelta: newMetricApachedruidQueryCacheMemcachedDelta(mbc.Metrics.ApachedruidQueryCacheMemcachedDelta), - metricApachedruidQueryCacheMemcachedTotal: newMetricApachedruidQueryCacheMemcachedTotal(mbc.Metrics.ApachedruidQueryCacheMemcachedTotal), - metricApachedruidQueryCacheTotalAverageBytes: newMetricApachedruidQueryCacheTotalAverageBytes(mbc.Metrics.ApachedruidQueryCacheTotalAverageBytes), - metricApachedruidQueryCacheTotalErrors: newMetricApachedruidQueryCacheTotalErrors(mbc.Metrics.ApachedruidQueryCacheTotalErrors), - metricApachedruidQueryCacheTotalEvictions: newMetricApachedruidQueryCacheTotalEvictions(mbc.Metrics.ApachedruidQueryCacheTotalEvictions), - metricApachedruidQueryCacheTotalHitRate: newMetricApachedruidQueryCacheTotalHitRate(mbc.Metrics.ApachedruidQueryCacheTotalHitRate), - metricApachedruidQueryCacheTotalHits: newMetricApachedruidQueryCacheTotalHits(mbc.Metrics.ApachedruidQueryCacheTotalHits), - metricApachedruidQueryCacheTotalMisses: newMetricApachedruidQueryCacheTotalMisses(mbc.Metrics.ApachedruidQueryCacheTotalMisses), - metricApachedruidQueryCacheTotalNumEntries: newMetricApachedruidQueryCacheTotalNumEntries(mbc.Metrics.ApachedruidQueryCacheTotalNumEntries), - metricApachedruidQueryCacheTotalPutError: newMetricApachedruidQueryCacheTotalPutError(mbc.Metrics.ApachedruidQueryCacheTotalPutError), - metricApachedruidQueryCacheTotalPutOk: newMetricApachedruidQueryCacheTotalPutOk(mbc.Metrics.ApachedruidQueryCacheTotalPutOk), - metricApachedruidQueryCacheTotalPutOversized: newMetricApachedruidQueryCacheTotalPutOversized(mbc.Metrics.ApachedruidQueryCacheTotalPutOversized), - metricApachedruidQueryCacheTotalSizeBytes: newMetricApachedruidQueryCacheTotalSizeBytes(mbc.Metrics.ApachedruidQueryCacheTotalSizeBytes), - metricApachedruidQueryCacheTotalTimeouts: newMetricApachedruidQueryCacheTotalTimeouts(mbc.Metrics.ApachedruidQueryCacheTotalTimeouts), - metricApachedruidQueryCount: newMetricApachedruidQueryCount(mbc.Metrics.ApachedruidQueryCount), - metricApachedruidQueryCPUTime: newMetricApachedruidQueryCPUTime(mbc.Metrics.ApachedruidQueryCPUTime), - metricApachedruidQueryFailedCount: newMetricApachedruidQueryFailedCount(mbc.Metrics.ApachedruidQueryFailedCount), - metricApachedruidQueryInterruptedCount: newMetricApachedruidQueryInterruptedCount(mbc.Metrics.ApachedruidQueryInterruptedCount), - metricApachedruidQueryNodeBackpressure: newMetricApachedruidQueryNodeBackpressure(mbc.Metrics.ApachedruidQueryNodeBackpressure), - metricApachedruidQueryNodeBytes: newMetricApachedruidQueryNodeBytes(mbc.Metrics.ApachedruidQueryNodeBytes), - metricApachedruidQueryNodeTime: newMetricApachedruidQueryNodeTime(mbc.Metrics.ApachedruidQueryNodeTime), - metricApachedruidQueryNodeTtfb: newMetricApachedruidQueryNodeTtfb(mbc.Metrics.ApachedruidQueryNodeTtfb), - metricApachedruidQueryPriority: newMetricApachedruidQueryPriority(mbc.Metrics.ApachedruidQueryPriority), - metricApachedruidQueryRowLimitExceededCount: newMetricApachedruidQueryRowLimitExceededCount(mbc.Metrics.ApachedruidQueryRowLimitExceededCount), - metricApachedruidQuerySegmentTime: newMetricApachedruidQuerySegmentTime(mbc.Metrics.ApachedruidQuerySegmentTime), - metricApachedruidQuerySegmentAndCacheTime: newMetricApachedruidQuerySegmentAndCacheTime(mbc.Metrics.ApachedruidQuerySegmentAndCacheTime), - metricApachedruidQuerySegmentsCount: newMetricApachedruidQuerySegmentsCount(mbc.Metrics.ApachedruidQuerySegmentsCount), - metricApachedruidQuerySuccessCount: newMetricApachedruidQuerySuccessCount(mbc.Metrics.ApachedruidQuerySuccessCount), - metricApachedruidQueryTime: newMetricApachedruidQueryTime(mbc.Metrics.ApachedruidQueryTime), - metricApachedruidQueryTimeoutCount: newMetricApachedruidQueryTimeoutCount(mbc.Metrics.ApachedruidQueryTimeoutCount), - metricApachedruidQueryWaitTime: newMetricApachedruidQueryWaitTime(mbc.Metrics.ApachedruidQueryWaitTime), - metricApachedruidSegmentAddedBytes: newMetricApachedruidSegmentAddedBytes(mbc.Metrics.ApachedruidSegmentAddedBytes), - metricApachedruidSegmentAssignSkippedCount: newMetricApachedruidSegmentAssignSkippedCount(mbc.Metrics.ApachedruidSegmentAssignSkippedCount), - metricApachedruidSegmentAssignedCount: newMetricApachedruidSegmentAssignedCount(mbc.Metrics.ApachedruidSegmentAssignedCount), - metricApachedruidSegmentCompactedBytes: newMetricApachedruidSegmentCompactedBytes(mbc.Metrics.ApachedruidSegmentCompactedBytes), - metricApachedruidSegmentCompactedCount: newMetricApachedruidSegmentCompactedCount(mbc.Metrics.ApachedruidSegmentCompactedCount), - metricApachedruidSegmentCount: newMetricApachedruidSegmentCount(mbc.Metrics.ApachedruidSegmentCount), - metricApachedruidSegmentDeletedCount: newMetricApachedruidSegmentDeletedCount(mbc.Metrics.ApachedruidSegmentDeletedCount), - metricApachedruidSegmentDropQueueCount: newMetricApachedruidSegmentDropQueueCount(mbc.Metrics.ApachedruidSegmentDropQueueCount), - metricApachedruidSegmentDropSkippedCount: newMetricApachedruidSegmentDropSkippedCount(mbc.Metrics.ApachedruidSegmentDropSkippedCount), - metricApachedruidSegmentDroppedCount: newMetricApachedruidSegmentDroppedCount(mbc.Metrics.ApachedruidSegmentDroppedCount), - metricApachedruidSegmentLoadQueueAssigned: newMetricApachedruidSegmentLoadQueueAssigned(mbc.Metrics.ApachedruidSegmentLoadQueueAssigned), - metricApachedruidSegmentLoadQueueCancelled: newMetricApachedruidSegmentLoadQueueCancelled(mbc.Metrics.ApachedruidSegmentLoadQueueCancelled), - metricApachedruidSegmentLoadQueueCount: newMetricApachedruidSegmentLoadQueueCount(mbc.Metrics.ApachedruidSegmentLoadQueueCount), - metricApachedruidSegmentLoadQueueFailed: newMetricApachedruidSegmentLoadQueueFailed(mbc.Metrics.ApachedruidSegmentLoadQueueFailed), - metricApachedruidSegmentLoadQueueSize: newMetricApachedruidSegmentLoadQueueSize(mbc.Metrics.ApachedruidSegmentLoadQueueSize), - metricApachedruidSegmentLoadQueueSuccess: newMetricApachedruidSegmentLoadQueueSuccess(mbc.Metrics.ApachedruidSegmentLoadQueueSuccess), - metricApachedruidSegmentMax: newMetricApachedruidSegmentMax(mbc.Metrics.ApachedruidSegmentMax), - metricApachedruidSegmentMoveSkippedCount: newMetricApachedruidSegmentMoveSkippedCount(mbc.Metrics.ApachedruidSegmentMoveSkippedCount), - metricApachedruidSegmentMovedBytes: newMetricApachedruidSegmentMovedBytes(mbc.Metrics.ApachedruidSegmentMovedBytes), - metricApachedruidSegmentMovedCount: newMetricApachedruidSegmentMovedCount(mbc.Metrics.ApachedruidSegmentMovedCount), - metricApachedruidSegmentNukedBytes: newMetricApachedruidSegmentNukedBytes(mbc.Metrics.ApachedruidSegmentNukedBytes), - metricApachedruidSegmentOverShadowedCount: newMetricApachedruidSegmentOverShadowedCount(mbc.Metrics.ApachedruidSegmentOverShadowedCount), - metricApachedruidSegmentPendingDelete: newMetricApachedruidSegmentPendingDelete(mbc.Metrics.ApachedruidSegmentPendingDelete), - metricApachedruidSegmentRowCountAvg: newMetricApachedruidSegmentRowCountAvg(mbc.Metrics.ApachedruidSegmentRowCountAvg), - metricApachedruidSegmentRowCountRangeCount: newMetricApachedruidSegmentRowCountRangeCount(mbc.Metrics.ApachedruidSegmentRowCountRangeCount), - metricApachedruidSegmentScanActive: newMetricApachedruidSegmentScanActive(mbc.Metrics.ApachedruidSegmentScanActive), - metricApachedruidSegmentScanPending: newMetricApachedruidSegmentScanPending(mbc.Metrics.ApachedruidSegmentScanPending), - metricApachedruidSegmentSize: newMetricApachedruidSegmentSize(mbc.Metrics.ApachedruidSegmentSize), - metricApachedruidSegmentSkipCompactBytes: newMetricApachedruidSegmentSkipCompactBytes(mbc.Metrics.ApachedruidSegmentSkipCompactBytes), - metricApachedruidSegmentSkipCompactCount: newMetricApachedruidSegmentSkipCompactCount(mbc.Metrics.ApachedruidSegmentSkipCompactCount), - metricApachedruidSegmentUnavailableCount: newMetricApachedruidSegmentUnavailableCount(mbc.Metrics.ApachedruidSegmentUnavailableCount), - metricApachedruidSegmentUnderReplicatedCount: newMetricApachedruidSegmentUnderReplicatedCount(mbc.Metrics.ApachedruidSegmentUnderReplicatedCount), - metricApachedruidSegmentUnneededCount: newMetricApachedruidSegmentUnneededCount(mbc.Metrics.ApachedruidSegmentUnneededCount), - metricApachedruidSegmentUsed: newMetricApachedruidSegmentUsed(mbc.Metrics.ApachedruidSegmentUsed), - metricApachedruidSegmentUsedPercent: newMetricApachedruidSegmentUsedPercent(mbc.Metrics.ApachedruidSegmentUsedPercent), - metricApachedruidSegmentWaitCompactBytes: newMetricApachedruidSegmentWaitCompactBytes(mbc.Metrics.ApachedruidSegmentWaitCompactBytes), - metricApachedruidSegmentWaitCompactCount: newMetricApachedruidSegmentWaitCompactCount(mbc.Metrics.ApachedruidSegmentWaitCompactCount), - metricApachedruidServerviewInitTime: newMetricApachedruidServerviewInitTime(mbc.Metrics.ApachedruidServerviewInitTime), - metricApachedruidServerviewSyncHealthy: newMetricApachedruidServerviewSyncHealthy(mbc.Metrics.ApachedruidServerviewSyncHealthy), - metricApachedruidServerviewSyncUnstableTime: newMetricApachedruidServerviewSyncUnstableTime(mbc.Metrics.ApachedruidServerviewSyncUnstableTime), - metricApachedruidSQLQueryBytes: newMetricApachedruidSQLQueryBytes(mbc.Metrics.ApachedruidSQLQueryBytes), - metricApachedruidSQLQueryPlanningTimeMs: newMetricApachedruidSQLQueryPlanningTimeMs(mbc.Metrics.ApachedruidSQLQueryPlanningTimeMs), - metricApachedruidSQLQueryTime: newMetricApachedruidSQLQueryTime(mbc.Metrics.ApachedruidSQLQueryTime), - metricApachedruidSubqueryByteLimitCount: newMetricApachedruidSubqueryByteLimitCount(mbc.Metrics.ApachedruidSubqueryByteLimitCount), - metricApachedruidSubqueryFallbackCount: newMetricApachedruidSubqueryFallbackCount(mbc.Metrics.ApachedruidSubqueryFallbackCount), - metricApachedruidSubqueryFallbackInsufficientTypeCount: newMetricApachedruidSubqueryFallbackInsufficientTypeCount(mbc.Metrics.ApachedruidSubqueryFallbackInsufficientTypeCount), - metricApachedruidSubqueryFallbackUnknownReasonCount: newMetricApachedruidSubqueryFallbackUnknownReasonCount(mbc.Metrics.ApachedruidSubqueryFallbackUnknownReasonCount), - metricApachedruidSubqueryRowLimitCount: newMetricApachedruidSubqueryRowLimitCount(mbc.Metrics.ApachedruidSubqueryRowLimitCount), - metricApachedruidSysCPU: newMetricApachedruidSysCPU(mbc.Metrics.ApachedruidSysCPU), - metricApachedruidSysDiskQueue: newMetricApachedruidSysDiskQueue(mbc.Metrics.ApachedruidSysDiskQueue), - metricApachedruidSysDiskReadCount: newMetricApachedruidSysDiskReadCount(mbc.Metrics.ApachedruidSysDiskReadCount), - metricApachedruidSysDiskReadSize: newMetricApachedruidSysDiskReadSize(mbc.Metrics.ApachedruidSysDiskReadSize), - metricApachedruidSysDiskTransferTime: newMetricApachedruidSysDiskTransferTime(mbc.Metrics.ApachedruidSysDiskTransferTime), - metricApachedruidSysDiskWriteCount: newMetricApachedruidSysDiskWriteCount(mbc.Metrics.ApachedruidSysDiskWriteCount), - metricApachedruidSysDiskWriteSize: newMetricApachedruidSysDiskWriteSize(mbc.Metrics.ApachedruidSysDiskWriteSize), - metricApachedruidSysFsFilesCount: newMetricApachedruidSysFsFilesCount(mbc.Metrics.ApachedruidSysFsFilesCount), - metricApachedruidSysFsFilesFree: newMetricApachedruidSysFsFilesFree(mbc.Metrics.ApachedruidSysFsFilesFree), - metricApachedruidSysFsMax: newMetricApachedruidSysFsMax(mbc.Metrics.ApachedruidSysFsMax), - metricApachedruidSysFsUsed: newMetricApachedruidSysFsUsed(mbc.Metrics.ApachedruidSysFsUsed), - metricApachedruidSysLa1: newMetricApachedruidSysLa1(mbc.Metrics.ApachedruidSysLa1), - metricApachedruidSysLa15: newMetricApachedruidSysLa15(mbc.Metrics.ApachedruidSysLa15), - metricApachedruidSysLa5: newMetricApachedruidSysLa5(mbc.Metrics.ApachedruidSysLa5), - metricApachedruidSysMemFree: newMetricApachedruidSysMemFree(mbc.Metrics.ApachedruidSysMemFree), - metricApachedruidSysMemMax: newMetricApachedruidSysMemMax(mbc.Metrics.ApachedruidSysMemMax), - metricApachedruidSysMemUsed: newMetricApachedruidSysMemUsed(mbc.Metrics.ApachedruidSysMemUsed), - metricApachedruidSysNetReadDropped: newMetricApachedruidSysNetReadDropped(mbc.Metrics.ApachedruidSysNetReadDropped), - metricApachedruidSysNetReadErrors: newMetricApachedruidSysNetReadErrors(mbc.Metrics.ApachedruidSysNetReadErrors), - metricApachedruidSysNetReadPackets: newMetricApachedruidSysNetReadPackets(mbc.Metrics.ApachedruidSysNetReadPackets), - metricApachedruidSysNetReadSize: newMetricApachedruidSysNetReadSize(mbc.Metrics.ApachedruidSysNetReadSize), - metricApachedruidSysNetWriteCollisions: newMetricApachedruidSysNetWriteCollisions(mbc.Metrics.ApachedruidSysNetWriteCollisions), - metricApachedruidSysNetWriteErrors: newMetricApachedruidSysNetWriteErrors(mbc.Metrics.ApachedruidSysNetWriteErrors), - metricApachedruidSysNetWritePackets: newMetricApachedruidSysNetWritePackets(mbc.Metrics.ApachedruidSysNetWritePackets), - metricApachedruidSysNetWriteSize: newMetricApachedruidSysNetWriteSize(mbc.Metrics.ApachedruidSysNetWriteSize), - metricApachedruidSysStorageUsed: newMetricApachedruidSysStorageUsed(mbc.Metrics.ApachedruidSysStorageUsed), - metricApachedruidSysSwapFree: newMetricApachedruidSysSwapFree(mbc.Metrics.ApachedruidSysSwapFree), - metricApachedruidSysSwapMax: newMetricApachedruidSysSwapMax(mbc.Metrics.ApachedruidSysSwapMax), - metricApachedruidSysSwapPageIn: newMetricApachedruidSysSwapPageIn(mbc.Metrics.ApachedruidSysSwapPageIn), - metricApachedruidSysSwapPageOut: newMetricApachedruidSysSwapPageOut(mbc.Metrics.ApachedruidSysSwapPageOut), - metricApachedruidSysTcpv4ActiveOpens: newMetricApachedruidSysTcpv4ActiveOpens(mbc.Metrics.ApachedruidSysTcpv4ActiveOpens), - metricApachedruidSysTcpv4AttemptFails: newMetricApachedruidSysTcpv4AttemptFails(mbc.Metrics.ApachedruidSysTcpv4AttemptFails), - metricApachedruidSysTcpv4EstabResets: newMetricApachedruidSysTcpv4EstabResets(mbc.Metrics.ApachedruidSysTcpv4EstabResets), - metricApachedruidSysTcpv4InErrs: newMetricApachedruidSysTcpv4InErrs(mbc.Metrics.ApachedruidSysTcpv4InErrs), - metricApachedruidSysTcpv4InSegs: newMetricApachedruidSysTcpv4InSegs(mbc.Metrics.ApachedruidSysTcpv4InSegs), - metricApachedruidSysTcpv4OutRsts: newMetricApachedruidSysTcpv4OutRsts(mbc.Metrics.ApachedruidSysTcpv4OutRsts), - metricApachedruidSysTcpv4OutSegs: newMetricApachedruidSysTcpv4OutSegs(mbc.Metrics.ApachedruidSysTcpv4OutSegs), - metricApachedruidSysTcpv4PassiveOpens: newMetricApachedruidSysTcpv4PassiveOpens(mbc.Metrics.ApachedruidSysTcpv4PassiveOpens), - metricApachedruidSysTcpv4RetransSegs: newMetricApachedruidSysTcpv4RetransSegs(mbc.Metrics.ApachedruidSysTcpv4RetransSegs), - metricApachedruidSysUptime: newMetricApachedruidSysUptime(mbc.Metrics.ApachedruidSysUptime), - metricApachedruidTaskActionBatchAttempts: newMetricApachedruidTaskActionBatchAttempts(mbc.Metrics.ApachedruidTaskActionBatchAttempts), - metricApachedruidTaskActionBatchQueueTime: newMetricApachedruidTaskActionBatchQueueTime(mbc.Metrics.ApachedruidTaskActionBatchQueueTime), - metricApachedruidTaskActionBatchRunTime: newMetricApachedruidTaskActionBatchRunTime(mbc.Metrics.ApachedruidTaskActionBatchRunTime), - metricApachedruidTaskActionBatchSize: newMetricApachedruidTaskActionBatchSize(mbc.Metrics.ApachedruidTaskActionBatchSize), - metricApachedruidTaskActionFailedCount: newMetricApachedruidTaskActionFailedCount(mbc.Metrics.ApachedruidTaskActionFailedCount), - metricApachedruidTaskActionLogTime: newMetricApachedruidTaskActionLogTime(mbc.Metrics.ApachedruidTaskActionLogTime), - metricApachedruidTaskActionRunTime: newMetricApachedruidTaskActionRunTime(mbc.Metrics.ApachedruidTaskActionRunTime), - metricApachedruidTaskActionSuccessCount: newMetricApachedruidTaskActionSuccessCount(mbc.Metrics.ApachedruidTaskActionSuccessCount), - metricApachedruidTaskFailedCount: newMetricApachedruidTaskFailedCount(mbc.Metrics.ApachedruidTaskFailedCount), - metricApachedruidTaskPendingCount: newMetricApachedruidTaskPendingCount(mbc.Metrics.ApachedruidTaskPendingCount), - metricApachedruidTaskPendingTime: newMetricApachedruidTaskPendingTime(mbc.Metrics.ApachedruidTaskPendingTime), - metricApachedruidTaskRunTime: newMetricApachedruidTaskRunTime(mbc.Metrics.ApachedruidTaskRunTime), - metricApachedruidTaskRunningCount: newMetricApachedruidTaskRunningCount(mbc.Metrics.ApachedruidTaskRunningCount), - metricApachedruidTaskSegmentAvailabilityWaitTime: newMetricApachedruidTaskSegmentAvailabilityWaitTime(mbc.Metrics.ApachedruidTaskSegmentAvailabilityWaitTime), - metricApachedruidTaskSuccessCount: newMetricApachedruidTaskSuccessCount(mbc.Metrics.ApachedruidTaskSuccessCount), - metricApachedruidTaskWaitingCount: newMetricApachedruidTaskWaitingCount(mbc.Metrics.ApachedruidTaskWaitingCount), - metricApachedruidTaskSlotBlacklistedCount: newMetricApachedruidTaskSlotBlacklistedCount(mbc.Metrics.ApachedruidTaskSlotBlacklistedCount), - metricApachedruidTaskSlotIdleCount: newMetricApachedruidTaskSlotIdleCount(mbc.Metrics.ApachedruidTaskSlotIdleCount), - metricApachedruidTaskSlotLazyCount: newMetricApachedruidTaskSlotLazyCount(mbc.Metrics.ApachedruidTaskSlotLazyCount), - metricApachedruidTaskSlotTotalCount: newMetricApachedruidTaskSlotTotalCount(mbc.Metrics.ApachedruidTaskSlotTotalCount), - metricApachedruidTaskSlotUsedCount: newMetricApachedruidTaskSlotUsedCount(mbc.Metrics.ApachedruidTaskSlotUsedCount), - metricApachedruidTierHistoricalCount: newMetricApachedruidTierHistoricalCount(mbc.Metrics.ApachedruidTierHistoricalCount), - metricApachedruidTierReplicationFactor: newMetricApachedruidTierReplicationFactor(mbc.Metrics.ApachedruidTierReplicationFactor), - metricApachedruidTierRequiredCapacity: newMetricApachedruidTierRequiredCapacity(mbc.Metrics.ApachedruidTierRequiredCapacity), - metricApachedruidTierTotalCapacity: newMetricApachedruidTierTotalCapacity(mbc.Metrics.ApachedruidTierTotalCapacity), - metricApachedruidWorkerTaskFailedCount: newMetricApachedruidWorkerTaskFailedCount(mbc.Metrics.ApachedruidWorkerTaskFailedCount), - metricApachedruidWorkerTaskSuccessCount: newMetricApachedruidWorkerTaskSuccessCount(mbc.Metrics.ApachedruidWorkerTaskSuccessCount), - metricApachedruidWorkerTaskSlotIdleCount: newMetricApachedruidWorkerTaskSlotIdleCount(mbc.Metrics.ApachedruidWorkerTaskSlotIdleCount), - metricApachedruidWorkerTaskSlotTotalCount: newMetricApachedruidWorkerTaskSlotTotalCount(mbc.Metrics.ApachedruidWorkerTaskSlotTotalCount), - metricApachedruidWorkerTaskSlotUsedCount: newMetricApachedruidWorkerTaskSlotUsedCount(mbc.Metrics.ApachedruidWorkerTaskSlotUsedCount), - metricApachedruidZkConnected: newMetricApachedruidZkConnected(mbc.Metrics.ApachedruidZkConnected), - metricApachedruidZkReconnectTime: newMetricApachedruidZkReconnectTime(mbc.Metrics.ApachedruidZkReconnectTime), - } - for _, op := range options { - op(mb) - } - return mb -} - -// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. -func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { - return NewResourceBuilder(mb.config.ResourceAttributes) -} - -// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { - if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { - mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() - } -} - -// ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption func(pmetric.ResourceMetrics) - -// WithResource sets the provided resource on the emitted ResourceMetrics. -// It's recommended to use ResourceBuilder to create the resource. -func WithResource(res pcommon.Resource) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { - res.CopyTo(rm.Resource()) - } -} - -// WithStartTimeOverride overrides start time for all the resource metrics data points. -// This option should be only used if different start time has to be set on metrics coming from different resources. -func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { - var dps pmetric.NumberDataPointSlice - metrics := rm.ScopeMetrics().At(0).Metrics() - for i := 0; i < metrics.Len(); i++ { - switch metrics.At(i).Type() { - case pmetric.MetricTypeGauge: - dps = metrics.At(i).Gauge().DataPoints() - case pmetric.MetricTypeSum: - dps = metrics.At(i).Sum().DataPoints() - } - for j := 0; j < dps.Len(); j++ { - dps.At(j).SetStartTimestamp(start) - } - } - } -} - -// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for -// recording another set of data points as part of another resource. This function can be helpful when one scraper -// needs to emit metrics from several resources. Otherwise calling this function is not required, -// just `Emit` function can be called instead. -// Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { - rm := pmetric.NewResourceMetrics() - ils := rm.ScopeMetrics().AppendEmpty() - ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver") - ils.Scope().SetVersion(mb.buildInfo.Version) - ils.Metrics().EnsureCapacity(mb.metricsCapacity) - mb.metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis.emit(ils.Metrics()) - mb.metricApachedruidCompactTaskCount.emit(ils.Metrics()) - mb.metricApachedruidCompactTaskAvailableSlotCount.emit(ils.Metrics()) - mb.metricApachedruidCompactTaskMaxSlotCount.emit(ils.Metrics()) - mb.metricApachedruidCoordinatorGlobalTime.emit(ils.Metrics()) - mb.metricApachedruidCoordinatorTime.emit(ils.Metrics()) - mb.metricApachedruidIngestBytesReceived.emit(ils.Metrics()) - mb.metricApachedruidIngestCount.emit(ils.Metrics()) - mb.metricApachedruidIngestEventsBuffered.emit(ils.Metrics()) - mb.metricApachedruidIngestEventsDuplicate.emit(ils.Metrics()) - mb.metricApachedruidIngestEventsMessageGap.emit(ils.Metrics()) - mb.metricApachedruidIngestEventsProcessed.emit(ils.Metrics()) - mb.metricApachedruidIngestEventsProcessedWithError.emit(ils.Metrics()) - mb.metricApachedruidIngestEventsThrownAway.emit(ils.Metrics()) - mb.metricApachedruidIngestEventsUnparseable.emit(ils.Metrics()) - mb.metricApachedruidIngestHandoffCount.emit(ils.Metrics()) - mb.metricApachedruidIngestHandoffFailed.emit(ils.Metrics()) - mb.metricApachedruidIngestHandoffTime.emit(ils.Metrics()) - mb.metricApachedruidIngestInputBytes.emit(ils.Metrics()) - mb.metricApachedruidIngestKafkaAvgLag.emit(ils.Metrics()) - mb.metricApachedruidIngestKafkaLag.emit(ils.Metrics()) - mb.metricApachedruidIngestKafkaMaxLag.emit(ils.Metrics()) - mb.metricApachedruidIngestKafkaPartitionLag.emit(ils.Metrics()) - mb.metricApachedruidIngestKinesisAvgLagTime.emit(ils.Metrics()) - mb.metricApachedruidIngestKinesisLagTime.emit(ils.Metrics()) - mb.metricApachedruidIngestKinesisMaxLagTime.emit(ils.Metrics()) - mb.metricApachedruidIngestKinesisPartitionLagTime.emit(ils.Metrics()) - mb.metricApachedruidIngestMergeCPU.emit(ils.Metrics()) - mb.metricApachedruidIngestMergeTime.emit(ils.Metrics()) - mb.metricApachedruidIngestNoticesQueueSize.emit(ils.Metrics()) - mb.metricApachedruidIngestNoticesTime.emit(ils.Metrics()) - mb.metricApachedruidIngestPauseTime.emit(ils.Metrics()) - mb.metricApachedruidIngestPersistsBackPressure.emit(ils.Metrics()) - mb.metricApachedruidIngestPersistsCount.emit(ils.Metrics()) - mb.metricApachedruidIngestPersistsCPU.emit(ils.Metrics()) - mb.metricApachedruidIngestPersistsFailed.emit(ils.Metrics()) - mb.metricApachedruidIngestPersistsTime.emit(ils.Metrics()) - mb.metricApachedruidIngestRowsOutput.emit(ils.Metrics()) - mb.metricApachedruidIngestSegmentsCount.emit(ils.Metrics()) - mb.metricApachedruidIngestShuffleBytes.emit(ils.Metrics()) - mb.metricApachedruidIngestShuffleRequests.emit(ils.Metrics()) - mb.metricApachedruidIngestSinkCount.emit(ils.Metrics()) - mb.metricApachedruidIngestTombstonesCount.emit(ils.Metrics()) - mb.metricApachedruidIntervalCompactedCount.emit(ils.Metrics()) - mb.metricApachedruidIntervalSkipCompactCount.emit(ils.Metrics()) - mb.metricApachedruidIntervalWaitCompactCount.emit(ils.Metrics()) - mb.metricApachedruidJettyNumOpenConnections.emit(ils.Metrics()) - mb.metricApachedruidJettyThreadPoolBusy.emit(ils.Metrics()) - mb.metricApachedruidJettyThreadPoolIdle.emit(ils.Metrics()) - mb.metricApachedruidJettyThreadPoolIsLowOnThreads.emit(ils.Metrics()) - mb.metricApachedruidJettyThreadPoolMax.emit(ils.Metrics()) - mb.metricApachedruidJettyThreadPoolMin.emit(ils.Metrics()) - mb.metricApachedruidJettyThreadPoolQueueSize.emit(ils.Metrics()) - mb.metricApachedruidJettyThreadPoolTotal.emit(ils.Metrics()) - mb.metricApachedruidJvmBufferpoolCapacity.emit(ils.Metrics()) - mb.metricApachedruidJvmBufferpoolCount.emit(ils.Metrics()) - mb.metricApachedruidJvmBufferpoolUsed.emit(ils.Metrics()) - mb.metricApachedruidJvmGcCount.emit(ils.Metrics()) - mb.metricApachedruidJvmGcCPU.emit(ils.Metrics()) - mb.metricApachedruidJvmMemCommitted.emit(ils.Metrics()) - mb.metricApachedruidJvmMemInit.emit(ils.Metrics()) - mb.metricApachedruidJvmMemMax.emit(ils.Metrics()) - mb.metricApachedruidJvmMemUsed.emit(ils.Metrics()) - mb.metricApachedruidJvmPoolCommitted.emit(ils.Metrics()) - mb.metricApachedruidJvmPoolInit.emit(ils.Metrics()) - mb.metricApachedruidJvmPoolMax.emit(ils.Metrics()) - mb.metricApachedruidJvmPoolUsed.emit(ils.Metrics()) - mb.metricApachedruidKillPendingSegmentsCount.emit(ils.Metrics()) - mb.metricApachedruidKillTaskCount.emit(ils.Metrics()) - mb.metricApachedruidKillTaskAvailableSlotCount.emit(ils.Metrics()) - mb.metricApachedruidKillTaskMaxSlotCount.emit(ils.Metrics()) - mb.metricApachedruidMergeBufferPendingRequests.emit(ils.Metrics()) - mb.metricApachedruidMetadataKillAuditCount.emit(ils.Metrics()) - mb.metricApachedruidMetadataKillCompactionCount.emit(ils.Metrics()) - mb.metricApachedruidMetadataKillDatasourceCount.emit(ils.Metrics()) - mb.metricApachedruidMetadataKillRuleCount.emit(ils.Metrics()) - mb.metricApachedruidMetadataKillSupervisorCount.emit(ils.Metrics()) - mb.metricApachedruidMetadatacacheInitTime.emit(ils.Metrics()) - mb.metricApachedruidMetadatacacheRefreshCount.emit(ils.Metrics()) - mb.metricApachedruidMetadatacacheRefreshTime.emit(ils.Metrics()) - mb.metricApachedruidQueryByteLimitExceededCount.emit(ils.Metrics()) - mb.metricApachedruidQueryBytes.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaAverageBytes.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaErrors.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaEvictions.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaHitRate.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaHits.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaMisses.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaNumEntries.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaPutError.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaPutOk.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaPutOversized.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaSizeBytes.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheDeltaTimeouts.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheMemcachedDelta.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheMemcachedTotal.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalAverageBytes.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalErrors.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalEvictions.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalHitRate.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalHits.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalMisses.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalNumEntries.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalPutError.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalPutOk.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalPutOversized.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalSizeBytes.emit(ils.Metrics()) - mb.metricApachedruidQueryCacheTotalTimeouts.emit(ils.Metrics()) - mb.metricApachedruidQueryCount.emit(ils.Metrics()) - mb.metricApachedruidQueryCPUTime.emit(ils.Metrics()) - mb.metricApachedruidQueryFailedCount.emit(ils.Metrics()) - mb.metricApachedruidQueryInterruptedCount.emit(ils.Metrics()) - mb.metricApachedruidQueryNodeBackpressure.emit(ils.Metrics()) - mb.metricApachedruidQueryNodeBytes.emit(ils.Metrics()) - mb.metricApachedruidQueryNodeTime.emit(ils.Metrics()) - mb.metricApachedruidQueryNodeTtfb.emit(ils.Metrics()) - mb.metricApachedruidQueryPriority.emit(ils.Metrics()) - mb.metricApachedruidQueryRowLimitExceededCount.emit(ils.Metrics()) - mb.metricApachedruidQuerySegmentTime.emit(ils.Metrics()) - mb.metricApachedruidQuerySegmentAndCacheTime.emit(ils.Metrics()) - mb.metricApachedruidQuerySegmentsCount.emit(ils.Metrics()) - mb.metricApachedruidQuerySuccessCount.emit(ils.Metrics()) - mb.metricApachedruidQueryTime.emit(ils.Metrics()) - mb.metricApachedruidQueryTimeoutCount.emit(ils.Metrics()) - mb.metricApachedruidQueryWaitTime.emit(ils.Metrics()) - mb.metricApachedruidSegmentAddedBytes.emit(ils.Metrics()) - mb.metricApachedruidSegmentAssignSkippedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentAssignedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentCompactedBytes.emit(ils.Metrics()) - mb.metricApachedruidSegmentCompactedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentDeletedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentDropQueueCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentDropSkippedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentDroppedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentLoadQueueAssigned.emit(ils.Metrics()) - mb.metricApachedruidSegmentLoadQueueCancelled.emit(ils.Metrics()) - mb.metricApachedruidSegmentLoadQueueCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentLoadQueueFailed.emit(ils.Metrics()) - mb.metricApachedruidSegmentLoadQueueSize.emit(ils.Metrics()) - mb.metricApachedruidSegmentLoadQueueSuccess.emit(ils.Metrics()) - mb.metricApachedruidSegmentMax.emit(ils.Metrics()) - mb.metricApachedruidSegmentMoveSkippedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentMovedBytes.emit(ils.Metrics()) - mb.metricApachedruidSegmentMovedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentNukedBytes.emit(ils.Metrics()) - mb.metricApachedruidSegmentOverShadowedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentPendingDelete.emit(ils.Metrics()) - mb.metricApachedruidSegmentRowCountAvg.emit(ils.Metrics()) - mb.metricApachedruidSegmentRowCountRangeCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentScanActive.emit(ils.Metrics()) - mb.metricApachedruidSegmentScanPending.emit(ils.Metrics()) - mb.metricApachedruidSegmentSize.emit(ils.Metrics()) - mb.metricApachedruidSegmentSkipCompactBytes.emit(ils.Metrics()) - mb.metricApachedruidSegmentSkipCompactCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentUnavailableCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentUnderReplicatedCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentUnneededCount.emit(ils.Metrics()) - mb.metricApachedruidSegmentUsed.emit(ils.Metrics()) - mb.metricApachedruidSegmentUsedPercent.emit(ils.Metrics()) - mb.metricApachedruidSegmentWaitCompactBytes.emit(ils.Metrics()) - mb.metricApachedruidSegmentWaitCompactCount.emit(ils.Metrics()) - mb.metricApachedruidServerviewInitTime.emit(ils.Metrics()) - mb.metricApachedruidServerviewSyncHealthy.emit(ils.Metrics()) - mb.metricApachedruidServerviewSyncUnstableTime.emit(ils.Metrics()) - mb.metricApachedruidSQLQueryBytes.emit(ils.Metrics()) - mb.metricApachedruidSQLQueryPlanningTimeMs.emit(ils.Metrics()) - mb.metricApachedruidSQLQueryTime.emit(ils.Metrics()) - mb.metricApachedruidSubqueryByteLimitCount.emit(ils.Metrics()) - mb.metricApachedruidSubqueryFallbackCount.emit(ils.Metrics()) - mb.metricApachedruidSubqueryFallbackInsufficientTypeCount.emit(ils.Metrics()) - mb.metricApachedruidSubqueryFallbackUnknownReasonCount.emit(ils.Metrics()) - mb.metricApachedruidSubqueryRowLimitCount.emit(ils.Metrics()) - mb.metricApachedruidSysCPU.emit(ils.Metrics()) - mb.metricApachedruidSysDiskQueue.emit(ils.Metrics()) - mb.metricApachedruidSysDiskReadCount.emit(ils.Metrics()) - mb.metricApachedruidSysDiskReadSize.emit(ils.Metrics()) - mb.metricApachedruidSysDiskTransferTime.emit(ils.Metrics()) - mb.metricApachedruidSysDiskWriteCount.emit(ils.Metrics()) - mb.metricApachedruidSysDiskWriteSize.emit(ils.Metrics()) - mb.metricApachedruidSysFsFilesCount.emit(ils.Metrics()) - mb.metricApachedruidSysFsFilesFree.emit(ils.Metrics()) - mb.metricApachedruidSysFsMax.emit(ils.Metrics()) - mb.metricApachedruidSysFsUsed.emit(ils.Metrics()) - mb.metricApachedruidSysLa1.emit(ils.Metrics()) - mb.metricApachedruidSysLa15.emit(ils.Metrics()) - mb.metricApachedruidSysLa5.emit(ils.Metrics()) - mb.metricApachedruidSysMemFree.emit(ils.Metrics()) - mb.metricApachedruidSysMemMax.emit(ils.Metrics()) - mb.metricApachedruidSysMemUsed.emit(ils.Metrics()) - mb.metricApachedruidSysNetReadDropped.emit(ils.Metrics()) - mb.metricApachedruidSysNetReadErrors.emit(ils.Metrics()) - mb.metricApachedruidSysNetReadPackets.emit(ils.Metrics()) - mb.metricApachedruidSysNetReadSize.emit(ils.Metrics()) - mb.metricApachedruidSysNetWriteCollisions.emit(ils.Metrics()) - mb.metricApachedruidSysNetWriteErrors.emit(ils.Metrics()) - mb.metricApachedruidSysNetWritePackets.emit(ils.Metrics()) - mb.metricApachedruidSysNetWriteSize.emit(ils.Metrics()) - mb.metricApachedruidSysStorageUsed.emit(ils.Metrics()) - mb.metricApachedruidSysSwapFree.emit(ils.Metrics()) - mb.metricApachedruidSysSwapMax.emit(ils.Metrics()) - mb.metricApachedruidSysSwapPageIn.emit(ils.Metrics()) - mb.metricApachedruidSysSwapPageOut.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4ActiveOpens.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4AttemptFails.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4EstabResets.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4InErrs.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4InSegs.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4OutRsts.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4OutSegs.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4PassiveOpens.emit(ils.Metrics()) - mb.metricApachedruidSysTcpv4RetransSegs.emit(ils.Metrics()) - mb.metricApachedruidSysUptime.emit(ils.Metrics()) - mb.metricApachedruidTaskActionBatchAttempts.emit(ils.Metrics()) - mb.metricApachedruidTaskActionBatchQueueTime.emit(ils.Metrics()) - mb.metricApachedruidTaskActionBatchRunTime.emit(ils.Metrics()) - mb.metricApachedruidTaskActionBatchSize.emit(ils.Metrics()) - mb.metricApachedruidTaskActionFailedCount.emit(ils.Metrics()) - mb.metricApachedruidTaskActionLogTime.emit(ils.Metrics()) - mb.metricApachedruidTaskActionRunTime.emit(ils.Metrics()) - mb.metricApachedruidTaskActionSuccessCount.emit(ils.Metrics()) - mb.metricApachedruidTaskFailedCount.emit(ils.Metrics()) - mb.metricApachedruidTaskPendingCount.emit(ils.Metrics()) - mb.metricApachedruidTaskPendingTime.emit(ils.Metrics()) - mb.metricApachedruidTaskRunTime.emit(ils.Metrics()) - mb.metricApachedruidTaskRunningCount.emit(ils.Metrics()) - mb.metricApachedruidTaskSegmentAvailabilityWaitTime.emit(ils.Metrics()) - mb.metricApachedruidTaskSuccessCount.emit(ils.Metrics()) - mb.metricApachedruidTaskWaitingCount.emit(ils.Metrics()) - mb.metricApachedruidTaskSlotBlacklistedCount.emit(ils.Metrics()) - mb.metricApachedruidTaskSlotIdleCount.emit(ils.Metrics()) - mb.metricApachedruidTaskSlotLazyCount.emit(ils.Metrics()) - mb.metricApachedruidTaskSlotTotalCount.emit(ils.Metrics()) - mb.metricApachedruidTaskSlotUsedCount.emit(ils.Metrics()) - mb.metricApachedruidTierHistoricalCount.emit(ils.Metrics()) - mb.metricApachedruidTierReplicationFactor.emit(ils.Metrics()) - mb.metricApachedruidTierRequiredCapacity.emit(ils.Metrics()) - mb.metricApachedruidTierTotalCapacity.emit(ils.Metrics()) - mb.metricApachedruidWorkerTaskFailedCount.emit(ils.Metrics()) - mb.metricApachedruidWorkerTaskSuccessCount.emit(ils.Metrics()) - mb.metricApachedruidWorkerTaskSlotIdleCount.emit(ils.Metrics()) - mb.metricApachedruidWorkerTaskSlotTotalCount.emit(ils.Metrics()) - mb.metricApachedruidWorkerTaskSlotUsedCount.emit(ils.Metrics()) - mb.metricApachedruidZkConnected.emit(ils.Metrics()) - mb.metricApachedruidZkReconnectTime.emit(ils.Metrics()) - - for _, op := range rmo { - op(rm) - } - if ils.Metrics().Len() > 0 { - mb.updateCapacity(rm) - rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) - } -} - -// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for -// recording another set of metrics. This function will be responsible for applying all the transformations required to -// produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(rmo...) - metrics := mb.metricsBuffer - mb.metricsBuffer = pmetric.NewMetrics() - return metrics -} - -// RecordApachedruidCompactSegmentAnalyzerFetchAndProcessMillisDataPoint adds a data point to apachedruid.compact.segment_analyzer.fetch_and_process_millis metric. -func (mb *MetricsBuilder) RecordApachedruidCompactSegmentAnalyzerFetchAndProcessMillisDataPoint(ts pcommon.Timestamp, val int64, compactTaskTypeAttributeValue string, compactDataSourceAttributeValue string, compactGroupIDAttributeValue string, compactTagsAttributeValue string, compactTaskIDAttributeValue string) { - mb.metricApachedruidCompactSegmentAnalyzerFetchAndProcessMillis.recordDataPoint(mb.startTime, ts, val, compactTaskTypeAttributeValue, compactDataSourceAttributeValue, compactGroupIDAttributeValue, compactTagsAttributeValue, compactTaskIDAttributeValue) -} - -// RecordApachedruidCompactTaskCountDataPoint adds a data point to apachedruid.compact.task.count metric. -func (mb *MetricsBuilder) RecordApachedruidCompactTaskCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidCompactTaskCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidCompactTaskAvailableSlotCountDataPoint adds a data point to apachedruid.compact_task.available_slot.count metric. -func (mb *MetricsBuilder) RecordApachedruidCompactTaskAvailableSlotCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidCompactTaskAvailableSlotCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidCompactTaskMaxSlotCountDataPoint adds a data point to apachedruid.compact_task.max_slot.count metric. -func (mb *MetricsBuilder) RecordApachedruidCompactTaskMaxSlotCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidCompactTaskMaxSlotCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidCoordinatorGlobalTimeDataPoint adds a data point to apachedruid.coordinator.global.time metric. -func (mb *MetricsBuilder) RecordApachedruidCoordinatorGlobalTimeDataPoint(ts pcommon.Timestamp, val int64, coordinatorDutyGroupAttributeValue string) { - mb.metricApachedruidCoordinatorGlobalTime.recordDataPoint(mb.startTime, ts, val, coordinatorDutyGroupAttributeValue) -} - -// RecordApachedruidCoordinatorTimeDataPoint adds a data point to apachedruid.coordinator.time metric. -func (mb *MetricsBuilder) RecordApachedruidCoordinatorTimeDataPoint(ts pcommon.Timestamp, val int64, coordinatorDutyAttributeValue string) { - mb.metricApachedruidCoordinatorTime.recordDataPoint(mb.startTime, ts, val, coordinatorDutyAttributeValue) -} - -// RecordApachedruidIngestBytesReceivedDataPoint adds a data point to apachedruid.ingest.bytes.received metric. -func (mb *MetricsBuilder) RecordApachedruidIngestBytesReceivedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string, ingestServiceNameAttributeValue string) { - mb.metricApachedruidIngestBytesReceived.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestTaskIDAttributeValue, ingestDataSourceAttributeValue, ingestServiceNameAttributeValue) -} - -// RecordApachedruidIngestCountDataPoint adds a data point to apachedruid.ingest.count metric. -func (mb *MetricsBuilder) RecordApachedruidIngestCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { - mb.metricApachedruidIngestCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue, ingestTaskIngestionModeAttributeValue) -} - -// RecordApachedruidIngestEventsBufferedDataPoint adds a data point to apachedruid.ingest.events.buffered metric. -func (mb *MetricsBuilder) RecordApachedruidIngestEventsBufferedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestServiceNameAttributeValue string, ingestBufferCapacityAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestEventsBuffered.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestServiceNameAttributeValue, ingestBufferCapacityAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestEventsDuplicateDataPoint adds a data point to apachedruid.ingest.events.duplicate metric. -func (mb *MetricsBuilder) RecordApachedruidIngestEventsDuplicateDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestEventsDuplicate.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestEventsMessageGapDataPoint adds a data point to apachedruid.ingest.events.message_gap metric. -func (mb *MetricsBuilder) RecordApachedruidIngestEventsMessageGapDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestEventsMessageGap.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestEventsProcessedDataPoint adds a data point to apachedruid.ingest.events.processed metric. -func (mb *MetricsBuilder) RecordApachedruidIngestEventsProcessedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestEventsProcessed.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestEventsProcessedWithErrorDataPoint adds a data point to apachedruid.ingest.events.processed_with_error metric. -func (mb *MetricsBuilder) RecordApachedruidIngestEventsProcessedWithErrorDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestEventsProcessedWithError.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestEventsThrownAwayDataPoint adds a data point to apachedruid.ingest.events.thrown_away metric. -func (mb *MetricsBuilder) RecordApachedruidIngestEventsThrownAwayDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestEventsThrownAway.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestEventsUnparseableDataPoint adds a data point to apachedruid.ingest.events.unparseable metric. -func (mb *MetricsBuilder) RecordApachedruidIngestEventsUnparseableDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestEventsUnparseable.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestHandoffCountDataPoint adds a data point to apachedruid.ingest.handoff.count metric. -func (mb *MetricsBuilder) RecordApachedruidIngestHandoffCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestHandoffCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestHandoffFailedDataPoint adds a data point to apachedruid.ingest.handoff.failed metric. -func (mb *MetricsBuilder) RecordApachedruidIngestHandoffFailedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestHandoffFailed.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestHandoffTimeDataPoint adds a data point to apachedruid.ingest.handoff.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestHandoffTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestHandoffTime.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestInputBytesDataPoint adds a data point to apachedruid.ingest.input.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidIngestInputBytesDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestInputBytes.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestKafkaAvgLagDataPoint adds a data point to apachedruid.ingest.kafka.avg_lag metric. -func (mb *MetricsBuilder) RecordApachedruidIngestKafkaAvgLagDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestKafkaAvgLag.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestKafkaLagDataPoint adds a data point to apachedruid.ingest.kafka.lag metric. -func (mb *MetricsBuilder) RecordApachedruidIngestKafkaLagDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestKafkaLag.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestKafkaMaxLagDataPoint adds a data point to apachedruid.ingest.kafka.max_lag metric. -func (mb *MetricsBuilder) RecordApachedruidIngestKafkaMaxLagDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestKafkaMaxLag.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestKafkaPartitionLagDataPoint adds a data point to apachedruid.ingest.kafka.partition_lag metric. -func (mb *MetricsBuilder) RecordApachedruidIngestKafkaPartitionLagDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestPartitionAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestKafkaPartitionLag.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestPartitionAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestKinesisAvgLagTimeDataPoint adds a data point to apachedruid.ingest.kinesis.avg_lag.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestKinesisAvgLagTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestKinesisAvgLagTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestKinesisLagTimeDataPoint adds a data point to apachedruid.ingest.kinesis.lag.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestKinesisLagTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestKinesisLagTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestKinesisMaxLagTimeDataPoint adds a data point to apachedruid.ingest.kinesis.max_lag.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestKinesisMaxLagTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestKinesisMaxLagTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestKinesisPartitionLagTimeDataPoint adds a data point to apachedruid.ingest.kinesis.partition_lag.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestKinesisPartitionLagTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestPartitionAttributeValue string, ingestStreamAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestKinesisPartitionLagTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestPartitionAttributeValue, ingestStreamAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestMergeCPUDataPoint adds a data point to apachedruid.ingest.merge.cpu metric. -func (mb *MetricsBuilder) RecordApachedruidIngestMergeCPUDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestMergeCPU.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestMergeTimeDataPoint adds a data point to apachedruid.ingest.merge.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestMergeTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestMergeTime.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestNoticesQueueSizeDataPoint adds a data point to apachedruid.ingest.notices.queue_size metric. -func (mb *MetricsBuilder) RecordApachedruidIngestNoticesQueueSizeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestNoticesQueueSize.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestNoticesTimeDataPoint adds a data point to apachedruid.ingest.notices.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestNoticesTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestNoticesTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestPauseTimeDataPoint adds a data point to apachedruid.ingest.pause.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestPauseTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string) { - mb.metricApachedruidIngestPauseTime.recordDataPoint(mb.startTime, ts, val, ingestTagsAttributeValue, ingestTaskIDAttributeValue, ingestDataSourceAttributeValue) -} - -// RecordApachedruidIngestPersistsBackPressureDataPoint adds a data point to apachedruid.ingest.persists.back_pressure metric. -func (mb *MetricsBuilder) RecordApachedruidIngestPersistsBackPressureDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestPersistsBackPressure.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestPersistsCountDataPoint adds a data point to apachedruid.ingest.persists.count metric. -func (mb *MetricsBuilder) RecordApachedruidIngestPersistsCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestPersistsCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestPersistsCPUDataPoint adds a data point to apachedruid.ingest.persists.cpu metric. -func (mb *MetricsBuilder) RecordApachedruidIngestPersistsCPUDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestPersistsCPU.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestPersistsFailedDataPoint adds a data point to apachedruid.ingest.persists.failed metric. -func (mb *MetricsBuilder) RecordApachedruidIngestPersistsFailedDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestPersistsFailed.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestPersistsTimeDataPoint adds a data point to apachedruid.ingest.persists.time metric. -func (mb *MetricsBuilder) RecordApachedruidIngestPersistsTimeDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestPersistsTime.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestRowsOutputDataPoint adds a data point to apachedruid.ingest.rows.output metric. -func (mb *MetricsBuilder) RecordApachedruidIngestRowsOutputDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestTaskIDAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string) { - mb.metricApachedruidIngestRowsOutput.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestTaskIDAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue) -} - -// RecordApachedruidIngestSegmentsCountDataPoint adds a data point to apachedruid.ingest.segments.count metric. -func (mb *MetricsBuilder) RecordApachedruidIngestSegmentsCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { - mb.metricApachedruidIngestSegmentsCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue, ingestTaskIngestionModeAttributeValue) -} - -// RecordApachedruidIngestShuffleBytesDataPoint adds a data point to apachedruid.ingest.shuffle.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidIngestShuffleBytesDataPoint(ts pcommon.Timestamp, val int64, ingestSupervisorTaskIDAttributeValue string) { - mb.metricApachedruidIngestShuffleBytes.recordDataPoint(mb.startTime, ts, val, ingestSupervisorTaskIDAttributeValue) -} - -// RecordApachedruidIngestShuffleRequestsDataPoint adds a data point to apachedruid.ingest.shuffle.requests metric. -func (mb *MetricsBuilder) RecordApachedruidIngestShuffleRequestsDataPoint(ts pcommon.Timestamp, val int64, ingestSupervisorTaskIDAttributeValue string) { - mb.metricApachedruidIngestShuffleRequests.recordDataPoint(mb.startTime, ts, val, ingestSupervisorTaskIDAttributeValue) -} - -// RecordApachedruidIngestSinkCountDataPoint adds a data point to apachedruid.ingest.sink.count metric. -func (mb *MetricsBuilder) RecordApachedruidIngestSinkCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string) { - mb.metricApachedruidIngestSinkCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue) -} - -// RecordApachedruidIngestTombstonesCountDataPoint adds a data point to apachedruid.ingest.tombstones.count metric. -func (mb *MetricsBuilder) RecordApachedruidIngestTombstonesCountDataPoint(ts pcommon.Timestamp, val int64, ingestTaskTypeAttributeValue string, ingestDataSourceAttributeValue string, ingestGroupIDAttributeValue string, ingestTagsAttributeValue string, ingestTaskIDAttributeValue string, ingestTaskIngestionModeAttributeValue string) { - mb.metricApachedruidIngestTombstonesCount.recordDataPoint(mb.startTime, ts, val, ingestTaskTypeAttributeValue, ingestDataSourceAttributeValue, ingestGroupIDAttributeValue, ingestTagsAttributeValue, ingestTaskIDAttributeValue, ingestTaskIngestionModeAttributeValue) -} - -// RecordApachedruidIntervalCompactedCountDataPoint adds a data point to apachedruid.interval.compacted.count metric. -func (mb *MetricsBuilder) RecordApachedruidIntervalCompactedCountDataPoint(ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { - mb.metricApachedruidIntervalCompactedCount.recordDataPoint(mb.startTime, ts, val, intervalDataSourceAttributeValue) -} - -// RecordApachedruidIntervalSkipCompactCountDataPoint adds a data point to apachedruid.interval.skip_compact.count metric. -func (mb *MetricsBuilder) RecordApachedruidIntervalSkipCompactCountDataPoint(ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { - mb.metricApachedruidIntervalSkipCompactCount.recordDataPoint(mb.startTime, ts, val, intervalDataSourceAttributeValue) -} - -// RecordApachedruidIntervalWaitCompactCountDataPoint adds a data point to apachedruid.interval.wait_compact.count metric. -func (mb *MetricsBuilder) RecordApachedruidIntervalWaitCompactCountDataPoint(ts pcommon.Timestamp, val int64, intervalDataSourceAttributeValue string) { - mb.metricApachedruidIntervalWaitCompactCount.recordDataPoint(mb.startTime, ts, val, intervalDataSourceAttributeValue) -} - -// RecordApachedruidJettyNumOpenConnectionsDataPoint adds a data point to apachedruid.jetty.num_open_connections metric. -func (mb *MetricsBuilder) RecordApachedruidJettyNumOpenConnectionsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidJettyNumOpenConnections.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidJettyThreadPoolBusyDataPoint adds a data point to apachedruid.jetty.thread_pool.busy metric. -func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolBusyDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidJettyThreadPoolBusy.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidJettyThreadPoolIdleDataPoint adds a data point to apachedruid.jetty.thread_pool.idle metric. -func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolIdleDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidJettyThreadPoolIdle.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidJettyThreadPoolIsLowOnThreadsDataPoint adds a data point to apachedruid.jetty.thread_pool.is_low_on_threads metric. -func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolIsLowOnThreadsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidJettyThreadPoolIsLowOnThreads.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidJettyThreadPoolMaxDataPoint adds a data point to apachedruid.jetty.thread_pool.max metric. -func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolMaxDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidJettyThreadPoolMax.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidJettyThreadPoolMinDataPoint adds a data point to apachedruid.jetty.thread_pool.min metric. -func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolMinDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidJettyThreadPoolMin.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidJettyThreadPoolQueueSizeDataPoint adds a data point to apachedruid.jetty.thread_pool.queue_size metric. -func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolQueueSizeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidJettyThreadPoolQueueSize.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidJettyThreadPoolTotalDataPoint adds a data point to apachedruid.jetty.thread_pool.total metric. -func (mb *MetricsBuilder) RecordApachedruidJettyThreadPoolTotalDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidJettyThreadPoolTotal.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidJvmBufferpoolCapacityDataPoint adds a data point to apachedruid.jvm.bufferpool.capacity metric. -func (mb *MetricsBuilder) RecordApachedruidJvmBufferpoolCapacityDataPoint(ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { - mb.metricApachedruidJvmBufferpoolCapacity.recordDataPoint(mb.startTime, ts, val, jvmBufferpoolNameAttributeValue) -} - -// RecordApachedruidJvmBufferpoolCountDataPoint adds a data point to apachedruid.jvm.bufferpool.count metric. -func (mb *MetricsBuilder) RecordApachedruidJvmBufferpoolCountDataPoint(ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { - mb.metricApachedruidJvmBufferpoolCount.recordDataPoint(mb.startTime, ts, val, jvmBufferpoolNameAttributeValue) -} - -// RecordApachedruidJvmBufferpoolUsedDataPoint adds a data point to apachedruid.jvm.bufferpool.used metric. -func (mb *MetricsBuilder) RecordApachedruidJvmBufferpoolUsedDataPoint(ts pcommon.Timestamp, val int64, jvmBufferpoolNameAttributeValue string) { - mb.metricApachedruidJvmBufferpoolUsed.recordDataPoint(mb.startTime, ts, val, jvmBufferpoolNameAttributeValue) -} - -// RecordApachedruidJvmGcCountDataPoint adds a data point to apachedruid.jvm.gc.count metric. -func (mb *MetricsBuilder) RecordApachedruidJvmGcCountDataPoint(ts pcommon.Timestamp, val int64, jvmGcGenAttributeValue string, jvmGcNameAttributeValue string) { - mb.metricApachedruidJvmGcCount.recordDataPoint(mb.startTime, ts, val, jvmGcGenAttributeValue, jvmGcNameAttributeValue) -} - -// RecordApachedruidJvmGcCPUDataPoint adds a data point to apachedruid.jvm.gc.cpu metric. -func (mb *MetricsBuilder) RecordApachedruidJvmGcCPUDataPoint(ts pcommon.Timestamp, val int64, jvmGcGenAttributeValue string, jvmGcNameAttributeValue string) { - mb.metricApachedruidJvmGcCPU.recordDataPoint(mb.startTime, ts, val, jvmGcGenAttributeValue, jvmGcNameAttributeValue) -} - -// RecordApachedruidJvmMemCommittedDataPoint adds a data point to apachedruid.jvm.mem.committed metric. -func (mb *MetricsBuilder) RecordApachedruidJvmMemCommittedDataPoint(ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { - mb.metricApachedruidJvmMemCommitted.recordDataPoint(mb.startTime, ts, val, jvmMemKindAttributeValue) -} - -// RecordApachedruidJvmMemInitDataPoint adds a data point to apachedruid.jvm.mem.init metric. -func (mb *MetricsBuilder) RecordApachedruidJvmMemInitDataPoint(ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { - mb.metricApachedruidJvmMemInit.recordDataPoint(mb.startTime, ts, val, jvmMemKindAttributeValue) -} - -// RecordApachedruidJvmMemMaxDataPoint adds a data point to apachedruid.jvm.mem.max metric. -func (mb *MetricsBuilder) RecordApachedruidJvmMemMaxDataPoint(ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { - mb.metricApachedruidJvmMemMax.recordDataPoint(mb.startTime, ts, val, jvmMemKindAttributeValue) -} - -// RecordApachedruidJvmMemUsedDataPoint adds a data point to apachedruid.jvm.mem.used metric. -func (mb *MetricsBuilder) RecordApachedruidJvmMemUsedDataPoint(ts pcommon.Timestamp, val int64, jvmMemKindAttributeValue string) { - mb.metricApachedruidJvmMemUsed.recordDataPoint(mb.startTime, ts, val, jvmMemKindAttributeValue) -} - -// RecordApachedruidJvmPoolCommittedDataPoint adds a data point to apachedruid.jvm.pool.committed metric. -func (mb *MetricsBuilder) RecordApachedruidJvmPoolCommittedDataPoint(ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { - mb.metricApachedruidJvmPoolCommitted.recordDataPoint(mb.startTime, ts, val, jvmPoolNameAttributeValue, jvmPoolKindAttributeValue) -} - -// RecordApachedruidJvmPoolInitDataPoint adds a data point to apachedruid.jvm.pool.init metric. -func (mb *MetricsBuilder) RecordApachedruidJvmPoolInitDataPoint(ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { - mb.metricApachedruidJvmPoolInit.recordDataPoint(mb.startTime, ts, val, jvmPoolNameAttributeValue, jvmPoolKindAttributeValue) -} - -// RecordApachedruidJvmPoolMaxDataPoint adds a data point to apachedruid.jvm.pool.max metric. -func (mb *MetricsBuilder) RecordApachedruidJvmPoolMaxDataPoint(ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { - mb.metricApachedruidJvmPoolMax.recordDataPoint(mb.startTime, ts, val, jvmPoolNameAttributeValue, jvmPoolKindAttributeValue) -} - -// RecordApachedruidJvmPoolUsedDataPoint adds a data point to apachedruid.jvm.pool.used metric. -func (mb *MetricsBuilder) RecordApachedruidJvmPoolUsedDataPoint(ts pcommon.Timestamp, val int64, jvmPoolNameAttributeValue string, jvmPoolKindAttributeValue string) { - mb.metricApachedruidJvmPoolUsed.recordDataPoint(mb.startTime, ts, val, jvmPoolNameAttributeValue, jvmPoolKindAttributeValue) -} - -// RecordApachedruidKillPendingSegmentsCountDataPoint adds a data point to apachedruid.kill.pending_segments.count metric. -func (mb *MetricsBuilder) RecordApachedruidKillPendingSegmentsCountDataPoint(ts pcommon.Timestamp, val int64, killDataSourceAttributeValue string) { - mb.metricApachedruidKillPendingSegmentsCount.recordDataPoint(mb.startTime, ts, val, killDataSourceAttributeValue) -} - -// RecordApachedruidKillTaskCountDataPoint adds a data point to apachedruid.kill.task.count metric. -func (mb *MetricsBuilder) RecordApachedruidKillTaskCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidKillTaskCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidKillTaskAvailableSlotCountDataPoint adds a data point to apachedruid.kill_task.available_slot.count metric. -func (mb *MetricsBuilder) RecordApachedruidKillTaskAvailableSlotCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidKillTaskAvailableSlotCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidKillTaskMaxSlotCountDataPoint adds a data point to apachedruid.kill_task.max_slot.count metric. -func (mb *MetricsBuilder) RecordApachedruidKillTaskMaxSlotCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidKillTaskMaxSlotCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMergeBufferPendingRequestsDataPoint adds a data point to apachedruid.merge_buffer.pending_requests metric. -func (mb *MetricsBuilder) RecordApachedruidMergeBufferPendingRequestsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMergeBufferPendingRequests.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMetadataKillAuditCountDataPoint adds a data point to apachedruid.metadata.kill.audit.count metric. -func (mb *MetricsBuilder) RecordApachedruidMetadataKillAuditCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMetadataKillAuditCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMetadataKillCompactionCountDataPoint adds a data point to apachedruid.metadata.kill.compaction.count metric. -func (mb *MetricsBuilder) RecordApachedruidMetadataKillCompactionCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMetadataKillCompactionCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMetadataKillDatasourceCountDataPoint adds a data point to apachedruid.metadata.kill.datasource.count metric. -func (mb *MetricsBuilder) RecordApachedruidMetadataKillDatasourceCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMetadataKillDatasourceCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMetadataKillRuleCountDataPoint adds a data point to apachedruid.metadata.kill.rule.count metric. -func (mb *MetricsBuilder) RecordApachedruidMetadataKillRuleCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMetadataKillRuleCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMetadataKillSupervisorCountDataPoint adds a data point to apachedruid.metadata.kill.supervisor.count metric. -func (mb *MetricsBuilder) RecordApachedruidMetadataKillSupervisorCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMetadataKillSupervisorCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMetadatacacheInitTimeDataPoint adds a data point to apachedruid.metadatacache.init.time metric. -func (mb *MetricsBuilder) RecordApachedruidMetadatacacheInitTimeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMetadatacacheInitTime.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMetadatacacheRefreshCountDataPoint adds a data point to apachedruid.metadatacache.refresh.count metric. -func (mb *MetricsBuilder) RecordApachedruidMetadatacacheRefreshCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMetadatacacheRefreshCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidMetadatacacheRefreshTimeDataPoint adds a data point to apachedruid.metadatacache.refresh.time metric. -func (mb *MetricsBuilder) RecordApachedruidMetadatacacheRefreshTimeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidMetadatacacheRefreshTime.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryByteLimitExceededCountDataPoint adds a data point to apachedruid.query.byte_limit.exceeded.count metric. -func (mb *MetricsBuilder) RecordApachedruidQueryByteLimitExceededCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryByteLimitExceededCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryBytesDataPoint adds a data point to apachedruid.query.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidQueryBytesDataPoint(ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { - mb.metricApachedruidQueryBytes.recordDataPoint(mb.startTime, ts, val, queryDataSourceAttributeValue, queryNumMetricsAttributeValue, queryDimensionAttributeValue, queryHasFiltersAttributeValue, queryThresholdAttributeValue, queryNumComplexMetricsAttributeValue, queryTypeAttributeValue, queryRemoteAddressAttributeValue, queryIDAttributeValue, queryContextAttributeValue, queryNumDimensionsAttributeValue, queryIntervalAttributeValue, queryDurationAttributeValue) -} - -// RecordApachedruidQueryCacheDeltaAverageBytesDataPoint adds a data point to apachedruid.query.cache.delta.average_bytes metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaAverageBytesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaAverageBytes.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaErrorsDataPoint adds a data point to apachedruid.query.cache.delta.errors metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaErrorsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaErrors.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaEvictionsDataPoint adds a data point to apachedruid.query.cache.delta.evictions metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaEvictionsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaEvictions.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaHitRateDataPoint adds a data point to apachedruid.query.cache.delta.hit_rate metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaHitRateDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricApachedruidQueryCacheDeltaHitRate.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaHitsDataPoint adds a data point to apachedruid.query.cache.delta.hits metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaHitsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaHits.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaMissesDataPoint adds a data point to apachedruid.query.cache.delta.misses metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaMissesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaMisses.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaNumEntriesDataPoint adds a data point to apachedruid.query.cache.delta.num_entries metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaNumEntriesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaNumEntries.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaPutErrorDataPoint adds a data point to apachedruid.query.cache.delta.put.error metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaPutErrorDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaPutError.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaPutOkDataPoint adds a data point to apachedruid.query.cache.delta.put.ok metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaPutOkDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaPutOk.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaPutOversizedDataPoint adds a data point to apachedruid.query.cache.delta.put.oversized metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaPutOversizedDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaPutOversized.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaSizeBytesDataPoint adds a data point to apachedruid.query.cache.delta.size_bytes metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaSizeBytesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaSizeBytes.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheDeltaTimeoutsDataPoint adds a data point to apachedruid.query.cache.delta.timeouts metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheDeltaTimeoutsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheDeltaTimeouts.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheMemcachedDeltaDataPoint adds a data point to apachedruid.query.cache.memcached.delta metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheMemcachedDeltaDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheMemcachedDelta.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheMemcachedTotalDataPoint adds a data point to apachedruid.query.cache.memcached.total metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheMemcachedTotalDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheMemcachedTotal.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalAverageBytesDataPoint adds a data point to apachedruid.query.cache.total.average_bytes metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalAverageBytesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalAverageBytes.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalErrorsDataPoint adds a data point to apachedruid.query.cache.total.errors metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalErrorsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalErrors.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalEvictionsDataPoint adds a data point to apachedruid.query.cache.total.evictions metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalEvictionsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalEvictions.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalHitRateDataPoint adds a data point to apachedruid.query.cache.total.hit_rate metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalHitRateDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricApachedruidQueryCacheTotalHitRate.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalHitsDataPoint adds a data point to apachedruid.query.cache.total.hits metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalHitsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalHits.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalMissesDataPoint adds a data point to apachedruid.query.cache.total.misses metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalMissesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalMisses.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalNumEntriesDataPoint adds a data point to apachedruid.query.cache.total.num_entries metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalNumEntriesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalNumEntries.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalPutErrorDataPoint adds a data point to apachedruid.query.cache.total.put.error metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalPutErrorDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalPutError.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalPutOkDataPoint adds a data point to apachedruid.query.cache.total.put.ok metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalPutOkDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalPutOk.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalPutOversizedDataPoint adds a data point to apachedruid.query.cache.total.put.oversized metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalPutOversizedDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalPutOversized.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalSizeBytesDataPoint adds a data point to apachedruid.query.cache.total.size_bytes metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalSizeBytesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalSizeBytes.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCacheTotalTimeoutsDataPoint adds a data point to apachedruid.query.cache.total.timeouts metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCacheTotalTimeoutsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCacheTotalTimeouts.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCountDataPoint adds a data point to apachedruid.query.count metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryCPUTimeDataPoint adds a data point to apachedruid.query.cpu.time metric. -func (mb *MetricsBuilder) RecordApachedruidQueryCPUTimeDataPoint(ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { - mb.metricApachedruidQueryCPUTime.recordDataPoint(mb.startTime, ts, val, queryDataSourceAttributeValue, queryNumMetricsAttributeValue, queryDimensionAttributeValue, queryHasFiltersAttributeValue, queryThresholdAttributeValue, queryNumComplexMetricsAttributeValue, queryTypeAttributeValue, queryRemoteAddressAttributeValue, queryIDAttributeValue, queryContextAttributeValue, queryNumDimensionsAttributeValue, queryIntervalAttributeValue, queryDurationAttributeValue) -} - -// RecordApachedruidQueryFailedCountDataPoint adds a data point to apachedruid.query.failed.count metric. -func (mb *MetricsBuilder) RecordApachedruidQueryFailedCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryFailedCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryInterruptedCountDataPoint adds a data point to apachedruid.query.interrupted.count metric. -func (mb *MetricsBuilder) RecordApachedruidQueryInterruptedCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryInterruptedCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryNodeBackpressureDataPoint adds a data point to apachedruid.query.node.backpressure metric. -func (mb *MetricsBuilder) RecordApachedruidQueryNodeBackpressureDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { - mb.metricApachedruidQueryNodeBackpressure.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, queryServerAttributeValue, queryIDAttributeValue) -} - -// RecordApachedruidQueryNodeBytesDataPoint adds a data point to apachedruid.query.node.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidQueryNodeBytesDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { - mb.metricApachedruidQueryNodeBytes.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, queryServerAttributeValue, queryIDAttributeValue) -} - -// RecordApachedruidQueryNodeTimeDataPoint adds a data point to apachedruid.query.node.time metric. -func (mb *MetricsBuilder) RecordApachedruidQueryNodeTimeDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { - mb.metricApachedruidQueryNodeTime.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, queryServerAttributeValue, queryIDAttributeValue) -} - -// RecordApachedruidQueryNodeTtfbDataPoint adds a data point to apachedruid.query.node.ttfb metric. -func (mb *MetricsBuilder) RecordApachedruidQueryNodeTtfbDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, queryServerAttributeValue string, queryIDAttributeValue string) { - mb.metricApachedruidQueryNodeTtfb.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, queryServerAttributeValue, queryIDAttributeValue) -} - -// RecordApachedruidQueryPriorityDataPoint adds a data point to apachedruid.query.priority metric. -func (mb *MetricsBuilder) RecordApachedruidQueryPriorityDataPoint(ts pcommon.Timestamp, val int64, queryTypeAttributeValue string, queryDataSourceAttributeValue string, queryLaneAttributeValue string) { - mb.metricApachedruidQueryPriority.recordDataPoint(mb.startTime, ts, val, queryTypeAttributeValue, queryDataSourceAttributeValue, queryLaneAttributeValue) -} - -// RecordApachedruidQueryRowLimitExceededCountDataPoint adds a data point to apachedruid.query.row_limit.exceeded.count metric. -func (mb *MetricsBuilder) RecordApachedruidQueryRowLimitExceededCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryRowLimitExceededCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQuerySegmentTimeDataPoint adds a data point to apachedruid.query.segment.time metric. -func (mb *MetricsBuilder) RecordApachedruidQuerySegmentTimeDataPoint(ts pcommon.Timestamp, val int64, queryStatusAttributeValue string, querySegmentAttributeValue string, queryIDAttributeValue string, queryVectorizedAttributeValue string) { - mb.metricApachedruidQuerySegmentTime.recordDataPoint(mb.startTime, ts, val, queryStatusAttributeValue, querySegmentAttributeValue, queryIDAttributeValue, queryVectorizedAttributeValue) -} - -// RecordApachedruidQuerySegmentAndCacheTimeDataPoint adds a data point to apachedruid.query.segment_and_cache.time metric. -func (mb *MetricsBuilder) RecordApachedruidQuerySegmentAndCacheTimeDataPoint(ts pcommon.Timestamp, val int64, querySegmentAttributeValue string, queryIDAttributeValue string) { - mb.metricApachedruidQuerySegmentAndCacheTime.recordDataPoint(mb.startTime, ts, val, querySegmentAttributeValue, queryIDAttributeValue) -} - -// RecordApachedruidQuerySegmentsCountDataPoint adds a data point to apachedruid.query.segments.count metric. -func (mb *MetricsBuilder) RecordApachedruidQuerySegmentsCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQuerySegmentsCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQuerySuccessCountDataPoint adds a data point to apachedruid.query.success.count metric. -func (mb *MetricsBuilder) RecordApachedruidQuerySuccessCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQuerySuccessCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryTimeDataPoint adds a data point to apachedruid.query.time metric. -func (mb *MetricsBuilder) RecordApachedruidQueryTimeDataPoint(ts pcommon.Timestamp, val int64, queryDataSourceAttributeValue string, queryNumMetricsAttributeValue string, queryDimensionAttributeValue string, queryHasFiltersAttributeValue string, queryThresholdAttributeValue int64, queryNumComplexMetricsAttributeValue int64, queryTypeAttributeValue string, queryRemoteAddressAttributeValue string, queryIDAttributeValue string, queryContextAttributeValue string, queryNumDimensionsAttributeValue string, queryIntervalAttributeValue string, queryDurationAttributeValue string) { - mb.metricApachedruidQueryTime.recordDataPoint(mb.startTime, ts, val, queryDataSourceAttributeValue, queryNumMetricsAttributeValue, queryDimensionAttributeValue, queryHasFiltersAttributeValue, queryThresholdAttributeValue, queryNumComplexMetricsAttributeValue, queryTypeAttributeValue, queryRemoteAddressAttributeValue, queryIDAttributeValue, queryContextAttributeValue, queryNumDimensionsAttributeValue, queryIntervalAttributeValue, queryDurationAttributeValue) -} - -// RecordApachedruidQueryTimeoutCountDataPoint adds a data point to apachedruid.query.timeout.count metric. -func (mb *MetricsBuilder) RecordApachedruidQueryTimeoutCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidQueryTimeoutCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidQueryWaitTimeDataPoint adds a data point to apachedruid.query.wait.time metric. -func (mb *MetricsBuilder) RecordApachedruidQueryWaitTimeDataPoint(ts pcommon.Timestamp, val int64, querySegmentAttributeValue string, queryIDAttributeValue string) { - mb.metricApachedruidQueryWaitTime.recordDataPoint(mb.startTime, ts, val, querySegmentAttributeValue, queryIDAttributeValue) -} - -// RecordApachedruidSegmentAddedBytesDataPoint adds a data point to apachedruid.segment.added.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentAddedBytesDataPoint(ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { - mb.metricApachedruidSegmentAddedBytes.recordDataPoint(mb.startTime, ts, val, segmentTaskTypeAttributeValue, segmentDataSourceAttributeValue, segmentGroupIDAttributeValue, segmentTagsAttributeValue, segmentTaskIDAttributeValue, segmentIntervalAttributeValue) -} - -// RecordApachedruidSegmentAssignSkippedCountDataPoint adds a data point to apachedruid.segment.assign_skipped.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentAssignSkippedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentAssignSkippedCount.recordDataPoint(mb.startTime, ts, val, segmentDescriptionAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentAssignedCountDataPoint adds a data point to apachedruid.segment.assigned.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentAssignedCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentAssignedCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentCompactedBytesDataPoint adds a data point to apachedruid.segment.compacted.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentCompactedBytesDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentCompactedBytes.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentCompactedCountDataPoint adds a data point to apachedruid.segment.compacted.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentCompactedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentCompactedCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentCountDataPoint adds a data point to apachedruid.segment.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentCountDataPoint(ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentCount.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentDeletedCountDataPoint adds a data point to apachedruid.segment.deleted.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentDeletedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentDeletedCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentDropQueueCountDataPoint adds a data point to apachedruid.segment.drop_queue.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentDropQueueCountDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { - mb.metricApachedruidSegmentDropQueueCount.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue) -} - -// RecordApachedruidSegmentDropSkippedCountDataPoint adds a data point to apachedruid.segment.drop_skipped.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentDropSkippedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentDropSkippedCount.recordDataPoint(mb.startTime, ts, val, segmentDescriptionAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentDroppedCountDataPoint adds a data point to apachedruid.segment.dropped.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentDroppedCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentDroppedCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentLoadQueueAssignedDataPoint adds a data point to apachedruid.segment.load_queue.assigned metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueAssignedDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentLoadQueueAssigned.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentLoadQueueCancelledDataPoint adds a data point to apachedruid.segment.load_queue.cancelled metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueCancelledDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentLoadQueueCancelled.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentLoadQueueCountDataPoint adds a data point to apachedruid.segment.load_queue.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueCountDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { - mb.metricApachedruidSegmentLoadQueueCount.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue) -} - -// RecordApachedruidSegmentLoadQueueFailedDataPoint adds a data point to apachedruid.segment.load_queue.failed metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueFailedDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentLoadQueueFailed.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentLoadQueueSizeDataPoint adds a data point to apachedruid.segment.load_queue.size metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueSizeDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string) { - mb.metricApachedruidSegmentLoadQueueSize.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue) -} - -// RecordApachedruidSegmentLoadQueueSuccessDataPoint adds a data point to apachedruid.segment.load_queue.success metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentLoadQueueSuccessDataPoint(ts pcommon.Timestamp, val int64, segmentServerAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentLoadQueueSuccess.recordDataPoint(mb.startTime, ts, val, segmentServerAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentMaxDataPoint adds a data point to apachedruid.segment.max metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentMaxDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSegmentMax.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSegmentMoveSkippedCountDataPoint adds a data point to apachedruid.segment.move_skipped.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentMoveSkippedCountDataPoint(ts pcommon.Timestamp, val int64, segmentDescriptionAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentMoveSkippedCount.recordDataPoint(mb.startTime, ts, val, segmentDescriptionAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentMovedBytesDataPoint adds a data point to apachedruid.segment.moved.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentMovedBytesDataPoint(ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { - mb.metricApachedruidSegmentMovedBytes.recordDataPoint(mb.startTime, ts, val, segmentTaskTypeAttributeValue, segmentDataSourceAttributeValue, segmentGroupIDAttributeValue, segmentTagsAttributeValue, segmentTaskIDAttributeValue, segmentIntervalAttributeValue) -} - -// RecordApachedruidSegmentMovedCountDataPoint adds a data point to apachedruid.segment.moved.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentMovedCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentMovedCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentNukedBytesDataPoint adds a data point to apachedruid.segment.nuked.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentNukedBytesDataPoint(ts pcommon.Timestamp, val int64, segmentTaskTypeAttributeValue string, segmentDataSourceAttributeValue string, segmentGroupIDAttributeValue string, segmentTagsAttributeValue string, segmentTaskIDAttributeValue string, segmentIntervalAttributeValue string) { - mb.metricApachedruidSegmentNukedBytes.recordDataPoint(mb.startTime, ts, val, segmentTaskTypeAttributeValue, segmentDataSourceAttributeValue, segmentGroupIDAttributeValue, segmentTagsAttributeValue, segmentTaskIDAttributeValue, segmentIntervalAttributeValue) -} - -// RecordApachedruidSegmentOverShadowedCountDataPoint adds a data point to apachedruid.segment.over_shadowed.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentOverShadowedCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSegmentOverShadowedCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSegmentPendingDeleteDataPoint adds a data point to apachedruid.segment.pending_delete metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentPendingDeleteDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSegmentPendingDelete.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSegmentRowCountAvgDataPoint adds a data point to apachedruid.segment.row_count.avg metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentRowCountAvgDataPoint(ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentRowCountAvg.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentRowCountRangeCountDataPoint adds a data point to apachedruid.segment.row_count.range.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentRowCountRangeCountDataPoint(ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string, segmentRangeAttributeValue string) { - mb.metricApachedruidSegmentRowCountRangeCount.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue, segmentRangeAttributeValue) -} - -// RecordApachedruidSegmentScanActiveDataPoint adds a data point to apachedruid.segment.scan.active metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentScanActiveDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSegmentScanActive.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSegmentScanPendingDataPoint adds a data point to apachedruid.segment.scan.pending metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentScanPendingDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSegmentScanPending.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSegmentSizeDataPoint adds a data point to apachedruid.segment.size metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentSizeDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentSize.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentSkipCompactBytesDataPoint adds a data point to apachedruid.segment.skip_compact.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentSkipCompactBytesDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentSkipCompactBytes.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentSkipCompactCountDataPoint adds a data point to apachedruid.segment.skip_compact.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentSkipCompactCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentSkipCompactCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentUnavailableCountDataPoint adds a data point to apachedruid.segment.unavailable.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentUnavailableCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentUnavailableCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentUnderReplicatedCountDataPoint adds a data point to apachedruid.segment.under_replicated.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentUnderReplicatedCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentUnderReplicatedCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentUnneededCountDataPoint adds a data point to apachedruid.segment.unneeded.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentUnneededCountDataPoint(ts pcommon.Timestamp, val int64, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentUnneededCount.recordDataPoint(mb.startTime, ts, val, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentUsedDataPoint adds a data point to apachedruid.segment.used metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentUsedDataPoint(ts pcommon.Timestamp, val int64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentUsed.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentUsedPercentDataPoint adds a data point to apachedruid.segment.used_percent metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentUsedPercentDataPoint(ts pcommon.Timestamp, val float64, segmentPriorityAttributeValue string, segmentTierAttributeValue string, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentUsedPercent.recordDataPoint(mb.startTime, ts, val, segmentPriorityAttributeValue, segmentTierAttributeValue, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentWaitCompactBytesDataPoint adds a data point to apachedruid.segment.wait_compact.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentWaitCompactBytesDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentWaitCompactBytes.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidSegmentWaitCompactCountDataPoint adds a data point to apachedruid.segment.wait_compact.count metric. -func (mb *MetricsBuilder) RecordApachedruidSegmentWaitCompactCountDataPoint(ts pcommon.Timestamp, val int64, segmentDataSourceAttributeValue string) { - mb.metricApachedruidSegmentWaitCompactCount.recordDataPoint(mb.startTime, ts, val, segmentDataSourceAttributeValue) -} - -// RecordApachedruidServerviewInitTimeDataPoint adds a data point to apachedruid.serverview.init.time metric. -func (mb *MetricsBuilder) RecordApachedruidServerviewInitTimeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidServerviewInitTime.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidServerviewSyncHealthyDataPoint adds a data point to apachedruid.serverview.sync.healthy metric. -func (mb *MetricsBuilder) RecordApachedruidServerviewSyncHealthyDataPoint(ts pcommon.Timestamp, val int64, serverviewTierAttributeValue string, serverviewServerAttributeValue string) { - mb.metricApachedruidServerviewSyncHealthy.recordDataPoint(mb.startTime, ts, val, serverviewTierAttributeValue, serverviewServerAttributeValue) -} - -// RecordApachedruidServerviewSyncUnstableTimeDataPoint adds a data point to apachedruid.serverview.sync.unstable_time metric. -func (mb *MetricsBuilder) RecordApachedruidServerviewSyncUnstableTimeDataPoint(ts pcommon.Timestamp, val int64, serverviewTierAttributeValue string, serverviewServerAttributeValue string) { - mb.metricApachedruidServerviewSyncUnstableTime.recordDataPoint(mb.startTime, ts, val, serverviewTierAttributeValue, serverviewServerAttributeValue) -} - -// RecordApachedruidSQLQueryBytesDataPoint adds a data point to apachedruid.sql_query.bytes metric. -func (mb *MetricsBuilder) RecordApachedruidSQLQueryBytesDataPoint(ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { - mb.metricApachedruidSQLQueryBytes.recordDataPoint(mb.startTime, ts, val, sqlQueryDataSourceAttributeValue, sqlQueryNativeQueryIdsAttributeValue, sqlQueryEngineAttributeValue, sqlQueryRemoteAddressAttributeValue, sqlQueryIDAttributeValue, sqlQuerySuccessAttributeValue) -} - -// RecordApachedruidSQLQueryPlanningTimeMsDataPoint adds a data point to apachedruid.sql_query.planning_time_ms metric. -func (mb *MetricsBuilder) RecordApachedruidSQLQueryPlanningTimeMsDataPoint(ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { - mb.metricApachedruidSQLQueryPlanningTimeMs.recordDataPoint(mb.startTime, ts, val, sqlQueryDataSourceAttributeValue, sqlQueryNativeQueryIdsAttributeValue, sqlQueryEngineAttributeValue, sqlQueryRemoteAddressAttributeValue, sqlQueryIDAttributeValue, sqlQuerySuccessAttributeValue) -} - -// RecordApachedruidSQLQueryTimeDataPoint adds a data point to apachedruid.sql_query.time metric. -func (mb *MetricsBuilder) RecordApachedruidSQLQueryTimeDataPoint(ts pcommon.Timestamp, val int64, sqlQueryDataSourceAttributeValue string, sqlQueryNativeQueryIdsAttributeValue string, sqlQueryEngineAttributeValue string, sqlQueryRemoteAddressAttributeValue string, sqlQueryIDAttributeValue string, sqlQuerySuccessAttributeValue string) { - mb.metricApachedruidSQLQueryTime.recordDataPoint(mb.startTime, ts, val, sqlQueryDataSourceAttributeValue, sqlQueryNativeQueryIdsAttributeValue, sqlQueryEngineAttributeValue, sqlQueryRemoteAddressAttributeValue, sqlQueryIDAttributeValue, sqlQuerySuccessAttributeValue) -} - -// RecordApachedruidSubqueryByteLimitCountDataPoint adds a data point to apachedruid.subquery.byte_limit.count metric. -func (mb *MetricsBuilder) RecordApachedruidSubqueryByteLimitCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSubqueryByteLimitCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSubqueryFallbackCountDataPoint adds a data point to apachedruid.subquery.fallback.count metric. -func (mb *MetricsBuilder) RecordApachedruidSubqueryFallbackCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSubqueryFallbackCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSubqueryFallbackInsufficientTypeCountDataPoint adds a data point to apachedruid.subquery.fallback.insufficient_type.count metric. -func (mb *MetricsBuilder) RecordApachedruidSubqueryFallbackInsufficientTypeCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSubqueryFallbackInsufficientTypeCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSubqueryFallbackUnknownReasonCountDataPoint adds a data point to apachedruid.subquery.fallback.unknown_reason.count metric. -func (mb *MetricsBuilder) RecordApachedruidSubqueryFallbackUnknownReasonCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSubqueryFallbackUnknownReasonCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSubqueryRowLimitCountDataPoint adds a data point to apachedruid.subquery.row_limit.count metric. -func (mb *MetricsBuilder) RecordApachedruidSubqueryRowLimitCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSubqueryRowLimitCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysCPUDataPoint adds a data point to apachedruid.sys.cpu metric. -func (mb *MetricsBuilder) RecordApachedruidSysCPUDataPoint(ts pcommon.Timestamp, val int64, sysCPUTimeAttributeValue string, sysCPUNameAttributeValue string) { - mb.metricApachedruidSysCPU.recordDataPoint(mb.startTime, ts, val, sysCPUTimeAttributeValue, sysCPUNameAttributeValue) -} - -// RecordApachedruidSysDiskQueueDataPoint adds a data point to apachedruid.sys.disk.queue metric. -func (mb *MetricsBuilder) RecordApachedruidSysDiskQueueDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - mb.metricApachedruidSysDiskQueue.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) -} - -// RecordApachedruidSysDiskReadCountDataPoint adds a data point to apachedruid.sys.disk.read.count metric. -func (mb *MetricsBuilder) RecordApachedruidSysDiskReadCountDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - mb.metricApachedruidSysDiskReadCount.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) -} - -// RecordApachedruidSysDiskReadSizeDataPoint adds a data point to apachedruid.sys.disk.read.size metric. -func (mb *MetricsBuilder) RecordApachedruidSysDiskReadSizeDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - mb.metricApachedruidSysDiskReadSize.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) -} - -// RecordApachedruidSysDiskTransferTimeDataPoint adds a data point to apachedruid.sys.disk.transfer_time metric. -func (mb *MetricsBuilder) RecordApachedruidSysDiskTransferTimeDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - mb.metricApachedruidSysDiskTransferTime.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) -} - -// RecordApachedruidSysDiskWriteCountDataPoint adds a data point to apachedruid.sys.disk.write.count metric. -func (mb *MetricsBuilder) RecordApachedruidSysDiskWriteCountDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - mb.metricApachedruidSysDiskWriteCount.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) -} - -// RecordApachedruidSysDiskWriteSizeDataPoint adds a data point to apachedruid.sys.disk.write.size metric. -func (mb *MetricsBuilder) RecordApachedruidSysDiskWriteSizeDataPoint(ts pcommon.Timestamp, val int64, sysDiskNameAttributeValue string) { - mb.metricApachedruidSysDiskWriteSize.recordDataPoint(mb.startTime, ts, val, sysDiskNameAttributeValue) -} - -// RecordApachedruidSysFsFilesCountDataPoint adds a data point to apachedruid.sys.fs.files.count metric. -func (mb *MetricsBuilder) RecordApachedruidSysFsFilesCountDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { - mb.metricApachedruidSysFsFilesCount.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue, sysFsDevNameAttributeValue) -} - -// RecordApachedruidSysFsFilesFreeDataPoint adds a data point to apachedruid.sys.fs.files.free metric. -func (mb *MetricsBuilder) RecordApachedruidSysFsFilesFreeDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { - mb.metricApachedruidSysFsFilesFree.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue, sysFsDevNameAttributeValue) -} - -// RecordApachedruidSysFsMaxDataPoint adds a data point to apachedruid.sys.fs.max metric. -func (mb *MetricsBuilder) RecordApachedruidSysFsMaxDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { - mb.metricApachedruidSysFsMax.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue, sysFsDevNameAttributeValue) -} - -// RecordApachedruidSysFsUsedDataPoint adds a data point to apachedruid.sys.fs.used metric. -func (mb *MetricsBuilder) RecordApachedruidSysFsUsedDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string, sysFsDevNameAttributeValue string) { - mb.metricApachedruidSysFsUsed.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue, sysFsDevNameAttributeValue) -} - -// RecordApachedruidSysLa1DataPoint adds a data point to apachedruid.sys.la.1 metric. -func (mb *MetricsBuilder) RecordApachedruidSysLa1DataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysLa1.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysLa15DataPoint adds a data point to apachedruid.sys.la.15 metric. -func (mb *MetricsBuilder) RecordApachedruidSysLa15DataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysLa15.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysLa5DataPoint adds a data point to apachedruid.sys.la.5 metric. -func (mb *MetricsBuilder) RecordApachedruidSysLa5DataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysLa5.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysMemFreeDataPoint adds a data point to apachedruid.sys.mem.free metric. -func (mb *MetricsBuilder) RecordApachedruidSysMemFreeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysMemFree.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysMemMaxDataPoint adds a data point to apachedruid.sys.mem.max metric. -func (mb *MetricsBuilder) RecordApachedruidSysMemMaxDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysMemMax.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysMemUsedDataPoint adds a data point to apachedruid.sys.mem.used metric. -func (mb *MetricsBuilder) RecordApachedruidSysMemUsedDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysMemUsed.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysNetReadDroppedDataPoint adds a data point to apachedruid.sys.net.read.dropped metric. -func (mb *MetricsBuilder) RecordApachedruidSysNetReadDroppedDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - mb.metricApachedruidSysNetReadDropped.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) -} - -// RecordApachedruidSysNetReadErrorsDataPoint adds a data point to apachedruid.sys.net.read.errors metric. -func (mb *MetricsBuilder) RecordApachedruidSysNetReadErrorsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - mb.metricApachedruidSysNetReadErrors.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) -} - -// RecordApachedruidSysNetReadPacketsDataPoint adds a data point to apachedruid.sys.net.read.packets metric. -func (mb *MetricsBuilder) RecordApachedruidSysNetReadPacketsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - mb.metricApachedruidSysNetReadPackets.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) -} - -// RecordApachedruidSysNetReadSizeDataPoint adds a data point to apachedruid.sys.net.read.size metric. -func (mb *MetricsBuilder) RecordApachedruidSysNetReadSizeDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - mb.metricApachedruidSysNetReadSize.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) -} - -// RecordApachedruidSysNetWriteCollisionsDataPoint adds a data point to apachedruid.sys.net.write.collisions metric. -func (mb *MetricsBuilder) RecordApachedruidSysNetWriteCollisionsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - mb.metricApachedruidSysNetWriteCollisions.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) -} - -// RecordApachedruidSysNetWriteErrorsDataPoint adds a data point to apachedruid.sys.net.write.errors metric. -func (mb *MetricsBuilder) RecordApachedruidSysNetWriteErrorsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - mb.metricApachedruidSysNetWriteErrors.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) -} - -// RecordApachedruidSysNetWritePacketsDataPoint adds a data point to apachedruid.sys.net.write.packets metric. -func (mb *MetricsBuilder) RecordApachedruidSysNetWritePacketsDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - mb.metricApachedruidSysNetWritePackets.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) -} - -// RecordApachedruidSysNetWriteSizeDataPoint adds a data point to apachedruid.sys.net.write.size metric. -func (mb *MetricsBuilder) RecordApachedruidSysNetWriteSizeDataPoint(ts pcommon.Timestamp, val int64, sysNetHwaddrAttributeValue string, sysNetNameAttributeValue string, sysNetAddressAttributeValue string) { - mb.metricApachedruidSysNetWriteSize.recordDataPoint(mb.startTime, ts, val, sysNetHwaddrAttributeValue, sysNetNameAttributeValue, sysNetAddressAttributeValue) -} - -// RecordApachedruidSysStorageUsedDataPoint adds a data point to apachedruid.sys.storage.used metric. -func (mb *MetricsBuilder) RecordApachedruidSysStorageUsedDataPoint(ts pcommon.Timestamp, val int64, sysFsDirNameAttributeValue string) { - mb.metricApachedruidSysStorageUsed.recordDataPoint(mb.startTime, ts, val, sysFsDirNameAttributeValue) -} - -// RecordApachedruidSysSwapFreeDataPoint adds a data point to apachedruid.sys.swap.free metric. -func (mb *MetricsBuilder) RecordApachedruidSysSwapFreeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysSwapFree.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysSwapMaxDataPoint adds a data point to apachedruid.sys.swap.max metric. -func (mb *MetricsBuilder) RecordApachedruidSysSwapMaxDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysSwapMax.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysSwapPageInDataPoint adds a data point to apachedruid.sys.swap.page_in metric. -func (mb *MetricsBuilder) RecordApachedruidSysSwapPageInDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysSwapPageIn.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysSwapPageOutDataPoint adds a data point to apachedruid.sys.swap.page_out metric. -func (mb *MetricsBuilder) RecordApachedruidSysSwapPageOutDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysSwapPageOut.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4ActiveOpensDataPoint adds a data point to apachedruid.sys.tcpv4.active_opens metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4ActiveOpensDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4ActiveOpens.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4AttemptFailsDataPoint adds a data point to apachedruid.sys.tcpv4.attempt_fails metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4AttemptFailsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4AttemptFails.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4EstabResetsDataPoint adds a data point to apachedruid.sys.tcpv4.estab_resets metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4EstabResetsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4EstabResets.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4InErrsDataPoint adds a data point to apachedruid.sys.tcpv4.in.errs metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4InErrsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4InErrs.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4InSegsDataPoint adds a data point to apachedruid.sys.tcpv4.in.segs metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4InSegsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4InSegs.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4OutRstsDataPoint adds a data point to apachedruid.sys.tcpv4.out.rsts metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4OutRstsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4OutRsts.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4OutSegsDataPoint adds a data point to apachedruid.sys.tcpv4.out.segs metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4OutSegsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4OutSegs.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4PassiveOpensDataPoint adds a data point to apachedruid.sys.tcpv4.passive_opens metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4PassiveOpensDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4PassiveOpens.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysTcpv4RetransSegsDataPoint adds a data point to apachedruid.sys.tcpv4.retrans.segs metric. -func (mb *MetricsBuilder) RecordApachedruidSysTcpv4RetransSegsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysTcpv4RetransSegs.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidSysUptimeDataPoint adds a data point to apachedruid.sys.uptime metric. -func (mb *MetricsBuilder) RecordApachedruidSysUptimeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidSysUptime.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidTaskActionBatchAttemptsDataPoint adds a data point to apachedruid.task.action.batch.attempts metric. -func (mb *MetricsBuilder) RecordApachedruidTaskActionBatchAttemptsDataPoint(ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { - mb.metricApachedruidTaskActionBatchAttempts.recordDataPoint(mb.startTime, ts, val, taskIntervalAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue) -} - -// RecordApachedruidTaskActionBatchQueueTimeDataPoint adds a data point to apachedruid.task.action.batch.queue_time metric. -func (mb *MetricsBuilder) RecordApachedruidTaskActionBatchQueueTimeDataPoint(ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { - mb.metricApachedruidTaskActionBatchQueueTime.recordDataPoint(mb.startTime, ts, val, taskIntervalAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue) -} - -// RecordApachedruidTaskActionBatchRunTimeDataPoint adds a data point to apachedruid.task.action.batch.run_time metric. -func (mb *MetricsBuilder) RecordApachedruidTaskActionBatchRunTimeDataPoint(ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { - mb.metricApachedruidTaskActionBatchRunTime.recordDataPoint(mb.startTime, ts, val, taskIntervalAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue) -} - -// RecordApachedruidTaskActionBatchSizeDataPoint adds a data point to apachedruid.task.action.batch.size metric. -func (mb *MetricsBuilder) RecordApachedruidTaskActionBatchSizeDataPoint(ts pcommon.Timestamp, val int64, taskIntervalAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string) { - mb.metricApachedruidTaskActionBatchSize.recordDataPoint(mb.startTime, ts, val, taskIntervalAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue) -} - -// RecordApachedruidTaskActionFailedCountDataPoint adds a data point to apachedruid.task.action.failed.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskActionFailedCountDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - mb.metricApachedruidTaskActionFailedCount.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) -} - -// RecordApachedruidTaskActionLogTimeDataPoint adds a data point to apachedruid.task.action.log.time metric. -func (mb *MetricsBuilder) RecordApachedruidTaskActionLogTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - mb.metricApachedruidTaskActionLogTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) -} - -// RecordApachedruidTaskActionRunTimeDataPoint adds a data point to apachedruid.task.action.run.time metric. -func (mb *MetricsBuilder) RecordApachedruidTaskActionRunTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - mb.metricApachedruidTaskActionRunTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) -} - -// RecordApachedruidTaskActionSuccessCountDataPoint adds a data point to apachedruid.task.action.success.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskActionSuccessCountDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskActionTypeAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - mb.metricApachedruidTaskActionSuccessCount.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskActionTypeAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) -} - -// RecordApachedruidTaskFailedCountDataPoint adds a data point to apachedruid.task.failed.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskFailedCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - mb.metricApachedruidTaskFailedCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) -} - -// RecordApachedruidTaskPendingCountDataPoint adds a data point to apachedruid.task.pending.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskPendingCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - mb.metricApachedruidTaskPendingCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) -} - -// RecordApachedruidTaskPendingTimeDataPoint adds a data point to apachedruid.task.pending.time metric. -func (mb *MetricsBuilder) RecordApachedruidTaskPendingTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - mb.metricApachedruidTaskPendingTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskGroupIDAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) -} - -// RecordApachedruidTaskRunTimeDataPoint adds a data point to apachedruid.task.run.time metric. -func (mb *MetricsBuilder) RecordApachedruidTaskRunTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskStatusAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - mb.metricApachedruidTaskRunTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskGroupIDAttributeValue, taskStatusAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) -} - -// RecordApachedruidTaskRunningCountDataPoint adds a data point to apachedruid.task.running.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskRunningCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - mb.metricApachedruidTaskRunningCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) -} - -// RecordApachedruidTaskSegmentAvailabilityWaitTimeDataPoint adds a data point to apachedruid.task.segment_availability.wait.time metric. -func (mb *MetricsBuilder) RecordApachedruidTaskSegmentAvailabilityWaitTimeDataPoint(ts pcommon.Timestamp, val int64, taskTypeAttributeValue string, taskDataSourceAttributeValue string, taskGroupIDAttributeValue string, taskSegmentAvailabilityConfirmedAttributeValue string, taskTagsAttributeValue string, taskIDAttributeValue string) { - mb.metricApachedruidTaskSegmentAvailabilityWaitTime.recordDataPoint(mb.startTime, ts, val, taskTypeAttributeValue, taskDataSourceAttributeValue, taskGroupIDAttributeValue, taskSegmentAvailabilityConfirmedAttributeValue, taskTagsAttributeValue, taskIDAttributeValue) -} - -// RecordApachedruidTaskSuccessCountDataPoint adds a data point to apachedruid.task.success.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskSuccessCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - mb.metricApachedruidTaskSuccessCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) -} - -// RecordApachedruidTaskWaitingCountDataPoint adds a data point to apachedruid.task.waiting.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskWaitingCountDataPoint(ts pcommon.Timestamp, val int64, taskDataSourceAttributeValue string) { - mb.metricApachedruidTaskWaitingCount.recordDataPoint(mb.startTime, ts, val, taskDataSourceAttributeValue) -} - -// RecordApachedruidTaskSlotBlacklistedCountDataPoint adds a data point to apachedruid.task_slot.blacklisted.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskSlotBlacklistedCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - mb.metricApachedruidTaskSlotBlacklistedCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) -} - -// RecordApachedruidTaskSlotIdleCountDataPoint adds a data point to apachedruid.task_slot.idle.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskSlotIdleCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - mb.metricApachedruidTaskSlotIdleCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) -} - -// RecordApachedruidTaskSlotLazyCountDataPoint adds a data point to apachedruid.task_slot.lazy.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskSlotLazyCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - mb.metricApachedruidTaskSlotLazyCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) -} - -// RecordApachedruidTaskSlotTotalCountDataPoint adds a data point to apachedruid.task_slot.total.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskSlotTotalCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - mb.metricApachedruidTaskSlotTotalCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) -} - -// RecordApachedruidTaskSlotUsedCountDataPoint adds a data point to apachedruid.task_slot.used.count metric. -func (mb *MetricsBuilder) RecordApachedruidTaskSlotUsedCountDataPoint(ts pcommon.Timestamp, val int64, taskSlotCategoryAttributeValue string) { - mb.metricApachedruidTaskSlotUsedCount.recordDataPoint(mb.startTime, ts, val, taskSlotCategoryAttributeValue) -} - -// RecordApachedruidTierHistoricalCountDataPoint adds a data point to apachedruid.tier.historical.count metric. -func (mb *MetricsBuilder) RecordApachedruidTierHistoricalCountDataPoint(ts pcommon.Timestamp, val int64, tierAttributeValue string) { - mb.metricApachedruidTierHistoricalCount.recordDataPoint(mb.startTime, ts, val, tierAttributeValue) -} - -// RecordApachedruidTierReplicationFactorDataPoint adds a data point to apachedruid.tier.replication.factor metric. -func (mb *MetricsBuilder) RecordApachedruidTierReplicationFactorDataPoint(ts pcommon.Timestamp, val int64, tierAttributeValue string) { - mb.metricApachedruidTierReplicationFactor.recordDataPoint(mb.startTime, ts, val, tierAttributeValue) -} - -// RecordApachedruidTierRequiredCapacityDataPoint adds a data point to apachedruid.tier.required.capacity metric. -func (mb *MetricsBuilder) RecordApachedruidTierRequiredCapacityDataPoint(ts pcommon.Timestamp, val int64, tierAttributeValue string) { - mb.metricApachedruidTierRequiredCapacity.recordDataPoint(mb.startTime, ts, val, tierAttributeValue) -} - -// RecordApachedruidTierTotalCapacityDataPoint adds a data point to apachedruid.tier.total.capacity metric. -func (mb *MetricsBuilder) RecordApachedruidTierTotalCapacityDataPoint(ts pcommon.Timestamp, val int64, tierAttributeValue string) { - mb.metricApachedruidTierTotalCapacity.recordDataPoint(mb.startTime, ts, val, tierAttributeValue) -} - -// RecordApachedruidWorkerTaskFailedCountDataPoint adds a data point to apachedruid.worker.task.failed.count metric. -func (mb *MetricsBuilder) RecordApachedruidWorkerTaskFailedCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - mb.metricApachedruidWorkerTaskFailedCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) -} - -// RecordApachedruidWorkerTaskSuccessCountDataPoint adds a data point to apachedruid.worker.task.success.count metric. -func (mb *MetricsBuilder) RecordApachedruidWorkerTaskSuccessCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - mb.metricApachedruidWorkerTaskSuccessCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) -} - -// RecordApachedruidWorkerTaskSlotIdleCountDataPoint adds a data point to apachedruid.worker.task_slot.idle.count metric. -func (mb *MetricsBuilder) RecordApachedruidWorkerTaskSlotIdleCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - mb.metricApachedruidWorkerTaskSlotIdleCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) -} - -// RecordApachedruidWorkerTaskSlotTotalCountDataPoint adds a data point to apachedruid.worker.task_slot.total.count metric. -func (mb *MetricsBuilder) RecordApachedruidWorkerTaskSlotTotalCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - mb.metricApachedruidWorkerTaskSlotTotalCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) -} - -// RecordApachedruidWorkerTaskSlotUsedCountDataPoint adds a data point to apachedruid.worker.task_slot.used.count metric. -func (mb *MetricsBuilder) RecordApachedruidWorkerTaskSlotUsedCountDataPoint(ts pcommon.Timestamp, val int64, workerCategoryAttributeValue string, workerVersionAttributeValue string) { - mb.metricApachedruidWorkerTaskSlotUsedCount.recordDataPoint(mb.startTime, ts, val, workerCategoryAttributeValue, workerVersionAttributeValue) -} - -// RecordApachedruidZkConnectedDataPoint adds a data point to apachedruid.zk.connected metric. -func (mb *MetricsBuilder) RecordApachedruidZkConnectedDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidZkConnected.recordDataPoint(mb.startTime, ts, val) -} - -// RecordApachedruidZkReconnectTimeDataPoint adds a data point to apachedruid.zk.reconnect.time metric. -func (mb *MetricsBuilder) RecordApachedruidZkReconnectTimeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricApachedruidZkReconnectTime.recordDataPoint(mb.startTime, ts, val) -} - -// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, -// and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pcommon.NewTimestampFromTime(time.Now()) - for _, op := range options { - op(mb) - } -} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_metrics_test.go b/receiver/apachedruidreceiver/internal/metadata/generated_metrics_test.go deleted file mode 100644 index 35b53657c8667..0000000000000 --- a/receiver/apachedruidreceiver/internal/metadata/generated_metrics_test.go +++ /dev/null @@ -1,5541 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/receiver/receivertest" - "go.uber.org/zap" - "go.uber.org/zap/zaptest/observer" -) - -type testConfigCollection int - -const ( - testSetDefault testConfigCollection = iota - testSetAll - testSetNone -) - -func TestMetricsBuilder(t *testing.T) { - tests := []struct { - name string - configSet testConfigCollection - }{ - { - name: "default", - configSet: testSetDefault, - }, - { - name: "all_set", - configSet: testSetAll, - }, - { - name: "none_set", - configSet: testSetNone, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - start := pcommon.Timestamp(1_000_000_000) - ts := pcommon.Timestamp(1_000_001_000) - observedZapCore, observedLogs := observer.New(zap.WarnLevel) - settings := receivertest.NewNopCreateSettings() - settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) - - expectedWarnings := 0 - - assert.Equal(t, expectedWarnings, observedLogs.Len()) - - defaultMetricsCount := 0 - allMetricsCount := 0 - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidCompactSegmentAnalyzerFetchAndProcessMillisDataPoint(ts, 1, "compact_task_type-val", "compact_data_source-val", "compact_group_id-val", "compact_tags-val", "compact_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidCompactTaskCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidCompactTaskAvailableSlotCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidCompactTaskMaxSlotCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidCoordinatorGlobalTimeDataPoint(ts, 1, "coordinator_duty_group-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidCoordinatorTimeDataPoint(ts, 1, "coordinator_duty-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestBytesReceivedDataPoint(ts, 1, "ingest_task_type-val", "ingest_task_id-val", "ingest_data_source-val", "ingest_service_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val", "ingest_task_ingestion_mode-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestEventsBufferedDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_service_name-val", "ingest_buffer_capacity-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestEventsDuplicateDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestEventsMessageGapDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestEventsProcessedDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestEventsProcessedWithErrorDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestEventsThrownAwayDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestEventsUnparseableDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestHandoffCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestHandoffFailedDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestHandoffTimeDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestInputBytesDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestKafkaAvgLagDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestKafkaLagDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestKafkaMaxLagDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestKafkaPartitionLagDataPoint(ts, 1, "ingest_tags-val", "ingest_partition-val", "ingest_stream-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestKinesisAvgLagTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestKinesisLagTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestKinesisMaxLagTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_stream-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestKinesisPartitionLagTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_partition-val", "ingest_stream-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestMergeCPUDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestMergeTimeDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestNoticesQueueSizeDataPoint(ts, 1, "ingest_tags-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestNoticesTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestPauseTimeDataPoint(ts, 1, "ingest_tags-val", "ingest_task_id-val", "ingest_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestPersistsBackPressureDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestPersistsCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestPersistsCPUDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestPersistsFailedDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestPersistsTimeDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestRowsOutputDataPoint(ts, 1, "ingest_task_type-val", "ingest_task_id-val", "ingest_data_source-val", "ingest_group_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestSegmentsCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val", "ingest_task_ingestion_mode-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestShuffleBytesDataPoint(ts, 1, "ingest_supervisor_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestShuffleRequestsDataPoint(ts, 1, "ingest_supervisor_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestSinkCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIngestTombstonesCountDataPoint(ts, 1, "ingest_task_type-val", "ingest_data_source-val", "ingest_group_id-val", "ingest_tags-val", "ingest_task_id-val", "ingest_task_ingestion_mode-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIntervalCompactedCountDataPoint(ts, 1, "interval_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIntervalSkipCompactCountDataPoint(ts, 1, "interval_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidIntervalWaitCompactCountDataPoint(ts, 1, "interval_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJettyNumOpenConnectionsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJettyThreadPoolBusyDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJettyThreadPoolIdleDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJettyThreadPoolIsLowOnThreadsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJettyThreadPoolMaxDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJettyThreadPoolMinDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJettyThreadPoolQueueSizeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJettyThreadPoolTotalDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmBufferpoolCapacityDataPoint(ts, 1, "jvm_bufferpool_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmBufferpoolCountDataPoint(ts, 1, "jvm_bufferpool_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmBufferpoolUsedDataPoint(ts, 1, "jvm_bufferpool_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmGcCountDataPoint(ts, 1, "jvm_gc_gen-val", "jvm_gc_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmGcCPUDataPoint(ts, 1, "jvm_gc_gen-val", "jvm_gc_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmMemCommittedDataPoint(ts, 1, "jvm_mem_kind-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmMemInitDataPoint(ts, 1, "jvm_mem_kind-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmMemMaxDataPoint(ts, 1, "jvm_mem_kind-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmMemUsedDataPoint(ts, 1, "jvm_mem_kind-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmPoolCommittedDataPoint(ts, 1, "jvm_pool_name-val", "jvm_pool_kind-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmPoolInitDataPoint(ts, 1, "jvm_pool_name-val", "jvm_pool_kind-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmPoolMaxDataPoint(ts, 1, "jvm_pool_name-val", "jvm_pool_kind-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidJvmPoolUsedDataPoint(ts, 1, "jvm_pool_name-val", "jvm_pool_kind-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidKillPendingSegmentsCountDataPoint(ts, 1, "kill_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidKillTaskCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidKillTaskAvailableSlotCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidKillTaskMaxSlotCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMergeBufferPendingRequestsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMetadataKillAuditCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMetadataKillCompactionCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMetadataKillDatasourceCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMetadataKillRuleCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMetadataKillSupervisorCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMetadatacacheInitTimeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMetadatacacheRefreshCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidMetadatacacheRefreshTimeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryByteLimitExceededCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryBytesDataPoint(ts, 1, "query_data_source-val", "query_num_metrics-val", "query_dimension-val", "query_has_filters-val", 15, 25, "query_type-val", "query_remote_address-val", "query_id-val", "query_context-val", "query_num_dimensions-val", "query_interval-val", "query_duration-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaAverageBytesDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaErrorsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaEvictionsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaHitRateDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaHitsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaMissesDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaNumEntriesDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaPutErrorDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaPutOkDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaPutOversizedDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaSizeBytesDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheDeltaTimeoutsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheMemcachedDeltaDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheMemcachedTotalDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalAverageBytesDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalErrorsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalEvictionsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalHitRateDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalHitsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalMissesDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalNumEntriesDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalPutErrorDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalPutOkDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalPutOversizedDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalSizeBytesDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCacheTotalTimeoutsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryCPUTimeDataPoint(ts, 1, "query_data_source-val", "query_num_metrics-val", "query_dimension-val", "query_has_filters-val", 15, 25, "query_type-val", "query_remote_address-val", "query_id-val", "query_context-val", "query_num_dimensions-val", "query_interval-val", "query_duration-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryFailedCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryInterruptedCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryNodeBackpressureDataPoint(ts, 1, "query_status-val", "query_server-val", "query_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryNodeBytesDataPoint(ts, 1, "query_status-val", "query_server-val", "query_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryNodeTimeDataPoint(ts, 1, "query_status-val", "query_server-val", "query_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryNodeTtfbDataPoint(ts, 1, "query_status-val", "query_server-val", "query_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryPriorityDataPoint(ts, 1, "query_type-val", "query_data_source-val", "query_lane-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryRowLimitExceededCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQuerySegmentTimeDataPoint(ts, 1, "query_status-val", "query_segment-val", "query_id-val", "query_vectorized-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQuerySegmentAndCacheTimeDataPoint(ts, 1, "query_segment-val", "query_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQuerySegmentsCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQuerySuccessCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryTimeDataPoint(ts, 1, "query_data_source-val", "query_num_metrics-val", "query_dimension-val", "query_has_filters-val", 15, 25, "query_type-val", "query_remote_address-val", "query_id-val", "query_context-val", "query_num_dimensions-val", "query_interval-val", "query_duration-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryTimeoutCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidQueryWaitTimeDataPoint(ts, 1, "query_segment-val", "query_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentAddedBytesDataPoint(ts, 1, "segment_task_type-val", "segment_data_source-val", "segment_group_id-val", "segment_tags-val", "segment_task_id-val", "segment_interval-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentAssignSkippedCountDataPoint(ts, 1, "segment_description-val", "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentAssignedCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentCompactedBytesDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentCompactedCountDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentCountDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentDeletedCountDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentDropQueueCountDataPoint(ts, 1, "segment_server-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentDropSkippedCountDataPoint(ts, 1, "segment_description-val", "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentDroppedCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentLoadQueueAssignedDataPoint(ts, 1, "segment_server-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentLoadQueueCancelledDataPoint(ts, 1, "segment_server-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentLoadQueueCountDataPoint(ts, 1, "segment_server-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentLoadQueueFailedDataPoint(ts, 1, "segment_server-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentLoadQueueSizeDataPoint(ts, 1, "segment_server-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentLoadQueueSuccessDataPoint(ts, 1, "segment_server-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentMaxDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentMoveSkippedCountDataPoint(ts, 1, "segment_description-val", "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentMovedBytesDataPoint(ts, 1, "segment_task_type-val", "segment_data_source-val", "segment_group_id-val", "segment_tags-val", "segment_task_id-val", "segment_interval-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentMovedCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentNukedBytesDataPoint(ts, 1, "segment_task_type-val", "segment_data_source-val", "segment_group_id-val", "segment_tags-val", "segment_task_id-val", "segment_interval-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentOverShadowedCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentPendingDeleteDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentRowCountAvgDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentRowCountRangeCountDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val", "segment_range-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentScanActiveDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentScanPendingDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentSizeDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentSkipCompactBytesDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentSkipCompactCountDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentUnavailableCountDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentUnderReplicatedCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentUnneededCountDataPoint(ts, 1, "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentUsedDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentUsedPercentDataPoint(ts, 1, "segment_priority-val", "segment_tier-val", "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentWaitCompactBytesDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSegmentWaitCompactCountDataPoint(ts, 1, "segment_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidServerviewInitTimeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidServerviewSyncHealthyDataPoint(ts, 1, "serverview_tier-val", "serverview_server-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidServerviewSyncUnstableTimeDataPoint(ts, 1, "serverview_tier-val", "serverview_server-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSQLQueryBytesDataPoint(ts, 1, "sqlQuery_data_source-val", "sqlQuery_native_query_ids-val", "sqlQuery_engine-val", "sqlQuery_remote_address-val", "sqlQuery_id-val", "sqlQuery_success-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSQLQueryPlanningTimeMsDataPoint(ts, 1, "sqlQuery_data_source-val", "sqlQuery_native_query_ids-val", "sqlQuery_engine-val", "sqlQuery_remote_address-val", "sqlQuery_id-val", "sqlQuery_success-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSQLQueryTimeDataPoint(ts, 1, "sqlQuery_data_source-val", "sqlQuery_native_query_ids-val", "sqlQuery_engine-val", "sqlQuery_remote_address-val", "sqlQuery_id-val", "sqlQuery_success-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSubqueryByteLimitCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSubqueryFallbackCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSubqueryFallbackInsufficientTypeCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSubqueryFallbackUnknownReasonCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSubqueryRowLimitCountDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysCPUDataPoint(ts, 1, "sys_cpu_time-val", "sys_cpu_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysDiskQueueDataPoint(ts, 1, "sys_disk_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysDiskReadCountDataPoint(ts, 1, "sys_disk_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysDiskReadSizeDataPoint(ts, 1, "sys_disk_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysDiskTransferTimeDataPoint(ts, 1, "sys_disk_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysDiskWriteCountDataPoint(ts, 1, "sys_disk_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysDiskWriteSizeDataPoint(ts, 1, "sys_disk_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysFsFilesCountDataPoint(ts, 1, "sys_fs_dir_name-val", "sys_fs_dev_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysFsFilesFreeDataPoint(ts, 1, "sys_fs_dir_name-val", "sys_fs_dev_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysFsMaxDataPoint(ts, 1, "sys_fs_dir_name-val", "sys_fs_dev_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysFsUsedDataPoint(ts, 1, "sys_fs_dir_name-val", "sys_fs_dev_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysLa1DataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysLa15DataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysLa5DataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysMemFreeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysMemMaxDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysMemUsedDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysNetReadDroppedDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysNetReadErrorsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysNetReadPacketsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysNetReadSizeDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysNetWriteCollisionsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysNetWriteErrorsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysNetWritePacketsDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysNetWriteSizeDataPoint(ts, 1, "sys_net_hwaddr-val", "sys_net_name-val", "sys_net_address-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysStorageUsedDataPoint(ts, 1, "sys_fs_dir_name-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysSwapFreeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysSwapMaxDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysSwapPageInDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysSwapPageOutDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4ActiveOpensDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4AttemptFailsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4EstabResetsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4InErrsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4InSegsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4OutRstsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4OutSegsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4PassiveOpensDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysTcpv4RetransSegsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidSysUptimeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskActionBatchAttemptsDataPoint(ts, 1, "task_interval-val", "task_data_source-val", "task_action_type-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskActionBatchQueueTimeDataPoint(ts, 1, "task_interval-val", "task_data_source-val", "task_action_type-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskActionBatchRunTimeDataPoint(ts, 1, "task_interval-val", "task_data_source-val", "task_action_type-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskActionBatchSizeDataPoint(ts, 1, "task_interval-val", "task_data_source-val", "task_action_type-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskActionFailedCountDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_action_type-val", "task_group_id-val", "task_tags-val", "task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskActionLogTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_action_type-val", "task_group_id-val", "task_tags-val", "task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskActionRunTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_action_type-val", "task_group_id-val", "task_tags-val", "task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskActionSuccessCountDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_action_type-val", "task_group_id-val", "task_tags-val", "task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskFailedCountDataPoint(ts, 1, "task_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskPendingCountDataPoint(ts, 1, "task_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskPendingTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_group_id-val", "task_tags-val", "task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskRunTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_group_id-val", "task_status-val", "task_tags-val", "task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskRunningCountDataPoint(ts, 1, "task_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskSegmentAvailabilityWaitTimeDataPoint(ts, 1, "task_type-val", "task_data_source-val", "task_group_id-val", "task_segment_availability_confirmed-val", "task_tags-val", "task_id-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskSuccessCountDataPoint(ts, 1, "task_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskWaitingCountDataPoint(ts, 1, "task_data_source-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskSlotBlacklistedCountDataPoint(ts, 1, "taskSlot_category-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskSlotIdleCountDataPoint(ts, 1, "taskSlot_category-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskSlotLazyCountDataPoint(ts, 1, "taskSlot_category-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskSlotTotalCountDataPoint(ts, 1, "taskSlot_category-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTaskSlotUsedCountDataPoint(ts, 1, "taskSlot_category-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTierHistoricalCountDataPoint(ts, 1, "tier-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTierReplicationFactorDataPoint(ts, 1, "tier-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTierRequiredCapacityDataPoint(ts, 1, "tier-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidTierTotalCapacityDataPoint(ts, 1, "tier-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidWorkerTaskFailedCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidWorkerTaskSuccessCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidWorkerTaskSlotIdleCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidWorkerTaskSlotTotalCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidWorkerTaskSlotUsedCountDataPoint(ts, 1, "worker_category-val", "worker_version-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidZkConnectedDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordApachedruidZkReconnectTimeDataPoint(ts, 1) - - rb := mb.NewResourceBuilder() - rb.SetApachedruidClusterName("apachedruid.cluster.name-val") - rb.SetApachedruidNodeHost("apachedruid.node.host-val") - rb.SetApachedruidNodeService("apachedruid.node.service-val") - res := rb.Emit() - metrics := mb.Emit(WithResource(res)) - - if test.configSet == testSetNone { - assert.Equal(t, 0, metrics.ResourceMetrics().Len()) - return - } - - assert.Equal(t, 1, metrics.ResourceMetrics().Len()) - rm := metrics.ResourceMetrics().At(0) - assert.Equal(t, res, rm.Resource()) - assert.Equal(t, 1, rm.ScopeMetrics().Len()) - ms := rm.ScopeMetrics().At(0).Metrics() - if test.configSet == testSetDefault { - assert.Equal(t, defaultMetricsCount, ms.Len()) - } - if test.configSet == testSetAll { - assert.Equal(t, allMetricsCount, ms.Len()) - } - validatedMetrics := make(map[string]bool) - for i := 0; i < ms.Len(); i++ { - switch ms.At(i).Name() { - case "apachedruid.compact.segment_analyzer.fetch_and_process_millis": - assert.False(t, validatedMetrics["apachedruid.compact.segment_analyzer.fetch_and_process_millis"], "Found a duplicate in the metrics slice: apachedruid.compact.segment_analyzer.fetch_and_process_millis") - validatedMetrics["apachedruid.compact.segment_analyzer.fetch_and_process_millis"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time taken to fetch and process segments to infer the schema for the compaction task to run.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "compact_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "compact_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "compact_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "compact_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "compact_task_id-val", attrVal.Str()) - case "apachedruid.compact.task.count": - assert.False(t, validatedMetrics["apachedruid.compact.task.count"], "Found a duplicate in the metrics slice: apachedruid.compact.task.count") - validatedMetrics["apachedruid.compact.task.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of tasks issued in the auto compaction run.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.compact_task.available_slot.count": - assert.False(t, validatedMetrics["apachedruid.compact_task.available_slot.count"], "Found a duplicate in the metrics slice: apachedruid.compact_task.available_slot.count") - validatedMetrics["apachedruid.compact_task.available_slot.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of available task slots that can be used for auto compaction tasks in the auto compaction run. This is the max number of task slots minus any currently running compaction tasks.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.compact_task.max_slot.count": - assert.False(t, validatedMetrics["apachedruid.compact_task.max_slot.count"], "Found a duplicate in the metrics slice: apachedruid.compact_task.max_slot.count") - validatedMetrics["apachedruid.compact_task.max_slot.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Maximum number of task slots available for auto compaction tasks in the auto compaction run.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.coordinator.global.time": - assert.False(t, validatedMetrics["apachedruid.coordinator.global.time"], "Found a duplicate in the metrics slice: apachedruid.coordinator.global.time") - validatedMetrics["apachedruid.coordinator.global.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. For example, Historical Management or Indexing.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("duty_group") - assert.True(t, ok) - assert.EqualValues(t, "coordinator_duty_group-val", attrVal.Str()) - case "apachedruid.coordinator.time": - assert.False(t, validatedMetrics["apachedruid.coordinator.time"], "Found a duplicate in the metrics slice: apachedruid.coordinator.time") - validatedMetrics["apachedruid.coordinator.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Approximate Coordinator duty runtime in milliseconds.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("duty") - assert.True(t, ok) - assert.EqualValues(t, "coordinator_duty-val", attrVal.Str()) - case "apachedruid.ingest.bytes.received": - assert.False(t, validatedMetrics["apachedruid.ingest.bytes.received"], "Found a duplicate in the metrics slice: apachedruid.ingest.bytes.received") - validatedMetrics["apachedruid.ingest.bytes.received"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of bytes received by the `EventReceiverFirehose`.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("service_name") - assert.True(t, ok) - assert.EqualValues(t, "ingest_service_name-val", attrVal.Str()) - case "apachedruid.ingest.count": - assert.False(t, validatedMetrics["apachedruid.ingest.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.count") - validatedMetrics["apachedruid.ingest.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Count of `1` every time an ingestion job runs (includes compaction jobs). Aggregate using dimensions.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_ingestion_mode") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_ingestion_mode-val", attrVal.Str()) - case "apachedruid.ingest.events.buffered": - assert.False(t, validatedMetrics["apachedruid.ingest.events.buffered"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.buffered") - validatedMetrics["apachedruid.ingest.events.buffered"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of events queued in the `EventReceiverFirehose` buffer.", ms.At(i).Description()) - assert.Equal(t, "{events}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("service_name") - assert.True(t, ok) - assert.EqualValues(t, "ingest_service_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("buffer_capacity") - assert.True(t, ok) - assert.EqualValues(t, "ingest_buffer_capacity-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.events.duplicate": - assert.False(t, validatedMetrics["apachedruid.ingest.events.duplicate"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.duplicate") - validatedMetrics["apachedruid.ingest.events.duplicate"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of events rejected because the events are duplicated.", ms.At(i).Description()) - assert.Equal(t, "{events}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.events.message_gap": - assert.False(t, validatedMetrics["apachedruid.ingest.events.message_gap"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.message_gap") - validatedMetrics["apachedruid.ingest.events.message_gap"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time gap in milliseconds between the latest ingested event timestamp and the current system timestamp of metrics emission. If the value is increasing but lag is low, Druid may not be receiving new data. This metric is reset as new tasks spawn up.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.events.processed": - assert.False(t, validatedMetrics["apachedruid.ingest.events.processed"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.processed") - validatedMetrics["apachedruid.ingest.events.processed"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of events processed per emission period.", ms.At(i).Description()) - assert.Equal(t, "{events}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.events.processed_with_error": - assert.False(t, validatedMetrics["apachedruid.ingest.events.processed_with_error"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.processed_with_error") - validatedMetrics["apachedruid.ingest.events.processed_with_error"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of events processed with some partial errors per emission period. Events processed with partial errors are counted towards both this metric and `ingest/events/processed`.", ms.At(i).Description()) - assert.Equal(t, "{events}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.events.thrown_away": - assert.False(t, validatedMetrics["apachedruid.ingest.events.thrown_away"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.thrown_away") - validatedMetrics["apachedruid.ingest.events.thrown_away"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of events rejected because they are null, or filtered by `transformSpec`, or outside one of `lateMessageRejectionPeriod`, `earlyMessageRejectionPeriod`, or `windowPeriod`.", ms.At(i).Description()) - assert.Equal(t, "{events}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.events.unparseable": - assert.False(t, validatedMetrics["apachedruid.ingest.events.unparseable"], "Found a duplicate in the metrics slice: apachedruid.ingest.events.unparseable") - validatedMetrics["apachedruid.ingest.events.unparseable"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of events rejected because the events are unparseable.", ms.At(i).Description()) - assert.Equal(t, "{events}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.handoff.count": - assert.False(t, validatedMetrics["apachedruid.ingest.handoff.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.handoff.count") - validatedMetrics["apachedruid.ingest.handoff.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of handoffs that happened.", ms.At(i).Description()) - assert.Equal(t, "{handoffs}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.handoff.failed": - assert.False(t, validatedMetrics["apachedruid.ingest.handoff.failed"], "Found a duplicate in the metrics slice: apachedruid.ingest.handoff.failed") - validatedMetrics["apachedruid.ingest.handoff.failed"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of handoffs that failed.", ms.At(i).Description()) - assert.Equal(t, "{handoffs}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.handoff.time": - assert.False(t, validatedMetrics["apachedruid.ingest.handoff.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.handoff.time") - validatedMetrics["apachedruid.ingest.handoff.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of milliseconds taken to handoff a set of segments.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.input.bytes": - assert.False(t, validatedMetrics["apachedruid.ingest.input.bytes"], "Found a duplicate in the metrics slice: apachedruid.ingest.input.bytes") - validatedMetrics["apachedruid.ingest.input.bytes"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of bytes read from input sources, after decompression but prior to parsing. This covers all data read, including data that does not end up being fully processed and ingested. For example, this includes data that ends up being rejected for being unparseable or filtered out.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.kafka.avg_lag": - assert.False(t, validatedMetrics["apachedruid.ingest.kafka.avg_lag"], "Found a duplicate in the metrics slice: apachedruid.ingest.kafka.avg_lag") - validatedMetrics["apachedruid.ingest.kafka.avg_lag"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("stream") - assert.True(t, ok) - assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.kafka.lag": - assert.False(t, validatedMetrics["apachedruid.ingest.kafka.lag"], "Found a duplicate in the metrics slice: apachedruid.ingest.kafka.lag") - validatedMetrics["apachedruid.ingest.kafka.lag"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("stream") - assert.True(t, ok) - assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.kafka.max_lag": - assert.False(t, validatedMetrics["apachedruid.ingest.kafka.max_lag"], "Found a duplicate in the metrics slice: apachedruid.ingest.kafka.max_lag") - validatedMetrics["apachedruid.ingest.kafka.max_lag"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("stream") - assert.True(t, ok) - assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.kafka.partition_lag": - assert.False(t, validatedMetrics["apachedruid.ingest.kafka.partition_lag"], "Found a duplicate in the metrics slice: apachedruid.ingest.kafka.partition_lag") - validatedMetrics["apachedruid.ingest.kafka.partition_lag"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Partition-wise lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers. Minimum emission period for this metric is a minute.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("partition") - assert.True(t, ok) - assert.EqualValues(t, "ingest_partition-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("stream") - assert.True(t, ok) - assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.kinesis.avg_lag.time": - assert.False(t, validatedMetrics["apachedruid.ingest.kinesis.avg_lag.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.kinesis.avg_lag.time") - validatedMetrics["apachedruid.ingest.kinesis.avg_lag.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("stream") - assert.True(t, ok) - assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.kinesis.lag.time": - assert.False(t, validatedMetrics["apachedruid.ingest.kinesis.lag.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.kinesis.lag.time") - validatedMetrics["apachedruid.ingest.kinesis.lag.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("stream") - assert.True(t, ok) - assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.kinesis.max_lag.time": - assert.False(t, validatedMetrics["apachedruid.ingest.kinesis.max_lag.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.kinesis.max_lag.time") - validatedMetrics["apachedruid.ingest.kinesis.max_lag.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Max lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("stream") - assert.True(t, ok) - assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.kinesis.partition_lag.time": - assert.False(t, validatedMetrics["apachedruid.ingest.kinesis.partition_lag.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.kinesis.partition_lag.time") - validatedMetrics["apachedruid.ingest.kinesis.partition_lag.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Partition-wise lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis. Minimum emission period for this metric is a minute.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("partition") - assert.True(t, ok) - assert.EqualValues(t, "ingest_partition-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("stream") - assert.True(t, ok) - assert.EqualValues(t, "ingest_stream-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.merge.cpu": - assert.False(t, validatedMetrics["apachedruid.ingest.merge.cpu"], "Found a duplicate in the metrics slice: apachedruid.ingest.merge.cpu") - validatedMetrics["apachedruid.ingest.merge.cpu"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "CPU time in Nanoseconds spent on merging intermediate segments.", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.merge.time": - assert.False(t, validatedMetrics["apachedruid.ingest.merge.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.merge.time") - validatedMetrics["apachedruid.ingest.merge.time"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Milliseconds spent merging intermediate segments.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.notices.queue_size": - assert.False(t, validatedMetrics["apachedruid.ingest.notices.queue_size"], "Found a duplicate in the metrics slice: apachedruid.ingest.notices.queue_size") - validatedMetrics["apachedruid.ingest.notices.queue_size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of pending notices to be processed by the coordinator.", ms.At(i).Description()) - assert.Equal(t, "{notices}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.notices.time": - assert.False(t, validatedMetrics["apachedruid.ingest.notices.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.notices.time") - validatedMetrics["apachedruid.ingest.notices.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to process a notice by the supervisor.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.pause.time": - assert.False(t, validatedMetrics["apachedruid.ingest.pause.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.pause.time") - validatedMetrics["apachedruid.ingest.pause.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds spent by a task in a paused state without ingesting.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - case "apachedruid.ingest.persists.back_pressure": - assert.False(t, validatedMetrics["apachedruid.ingest.persists.back_pressure"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.back_pressure") - validatedMetrics["apachedruid.ingest.persists.back_pressure"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Milliseconds spent creating persist tasks and blocking waiting for them to finish.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.persists.count": - assert.False(t, validatedMetrics["apachedruid.ingest.persists.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.count") - validatedMetrics["apachedruid.ingest.persists.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of times persist occurred.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.persists.cpu": - assert.False(t, validatedMetrics["apachedruid.ingest.persists.cpu"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.cpu") - validatedMetrics["apachedruid.ingest.persists.cpu"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "CPU time in nanoseconds spent on doing intermediate persist.", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.persists.failed": - assert.False(t, validatedMetrics["apachedruid.ingest.persists.failed"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.failed") - validatedMetrics["apachedruid.ingest.persists.failed"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of persists that failed.", ms.At(i).Description()) - assert.Equal(t, "{persists}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.persists.time": - assert.False(t, validatedMetrics["apachedruid.ingest.persists.time"], "Found a duplicate in the metrics slice: apachedruid.ingest.persists.time") - validatedMetrics["apachedruid.ingest.persists.time"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Milliseconds spent doing intermediate persist.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.rows.output": - assert.False(t, validatedMetrics["apachedruid.ingest.rows.output"], "Found a duplicate in the metrics slice: apachedruid.ingest.rows.output") - validatedMetrics["apachedruid.ingest.rows.output"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of Druid rows persisted.", ms.At(i).Description()) - assert.Equal(t, "{rows}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - case "apachedruid.ingest.segments.count": - assert.False(t, validatedMetrics["apachedruid.ingest.segments.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.segments.count") - validatedMetrics["apachedruid.ingest.segments.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Count of final segments created by job (includes tombstones).", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_ingestion_mode") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_ingestion_mode-val", attrVal.Str()) - case "apachedruid.ingest.shuffle.bytes": - assert.False(t, validatedMetrics["apachedruid.ingest.shuffle.bytes"], "Found a duplicate in the metrics slice: apachedruid.ingest.shuffle.bytes") - validatedMetrics["apachedruid.ingest.shuffle.bytes"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of bytes shuffled per emission period.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("supervisor_task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_supervisor_task_id-val", attrVal.Str()) - case "apachedruid.ingest.shuffle.requests": - assert.False(t, validatedMetrics["apachedruid.ingest.shuffle.requests"], "Found a duplicate in the metrics slice: apachedruid.ingest.shuffle.requests") - validatedMetrics["apachedruid.ingest.shuffle.requests"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of shuffle requests per emission period.", ms.At(i).Description()) - assert.Equal(t, "{requests}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("supervisor_task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_supervisor_task_id-val", attrVal.Str()) - case "apachedruid.ingest.sink.count": - assert.False(t, validatedMetrics["apachedruid.ingest.sink.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.sink.count") - validatedMetrics["apachedruid.ingest.sink.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of sinks not handed off.", ms.At(i).Description()) - assert.Equal(t, "{sinks}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - case "apachedruid.ingest.tombstones.count": - assert.False(t, validatedMetrics["apachedruid.ingest.tombstones.count"], "Found a duplicate in the metrics slice: apachedruid.ingest.tombstones.count") - validatedMetrics["apachedruid.ingest.tombstones.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Count of tombstones created by job.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "ingest_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "ingest_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_ingestion_mode") - assert.True(t, ok) - assert.EqualValues(t, "ingest_task_ingestion_mode-val", attrVal.Str()) - case "apachedruid.interval.compacted.count": - assert.False(t, validatedMetrics["apachedruid.interval.compacted.count"], "Found a duplicate in the metrics slice: apachedruid.interval.compacted.count") - validatedMetrics["apachedruid.interval.compacted.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Total number of intervals of this datasource that are already compacted with the spec set in the auto compaction config.", ms.At(i).Description()) - assert.Equal(t, "{intervals}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "interval_data_source-val", attrVal.Str()) - case "apachedruid.interval.skip_compact.count": - assert.False(t, validatedMetrics["apachedruid.interval.skip_compact.count"], "Found a duplicate in the metrics slice: apachedruid.interval.skip_compact.count") - validatedMetrics["apachedruid.interval.skip_compact.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of intervals of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.", ms.At(i).Description()) - assert.Equal(t, "{intervals}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "interval_data_source-val", attrVal.Str()) - case "apachedruid.interval.wait_compact.count": - assert.False(t, validatedMetrics["apachedruid.interval.wait_compact.count"], "Found a duplicate in the metrics slice: apachedruid.interval.wait_compact.count") - validatedMetrics["apachedruid.interval.wait_compact.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of intervals of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).", ms.At(i).Description()) - assert.Equal(t, "{intervals}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "interval_data_source-val", attrVal.Str()) - case "apachedruid.jetty.num_open_connections": - assert.False(t, validatedMetrics["apachedruid.jetty.num_open_connections"], "Found a duplicate in the metrics slice: apachedruid.jetty.num_open_connections") - validatedMetrics["apachedruid.jetty.num_open_connections"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of open jetty connections.", ms.At(i).Description()) - assert.Equal(t, "{connections}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.jetty.thread_pool.busy": - assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.busy"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.busy") - validatedMetrics["apachedruid.jetty.thread_pool.busy"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of busy threads that has work to do from the worker queue.", ms.At(i).Description()) - assert.Equal(t, "{threads}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.jetty.thread_pool.idle": - assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.idle"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.idle") - validatedMetrics["apachedruid.jetty.thread_pool.idle"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of idle threads.", ms.At(i).Description()) - assert.Equal(t, "{threads}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.jetty.thread_pool.is_low_on_threads": - assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.is_low_on_threads"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.is_low_on_threads") - validatedMetrics["apachedruid.jetty.thread_pool.is_low_on_threads"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "A rough indicator of whether number of total workable threads allocated is enough to handle the works in the work queue.", ms.At(i).Description()) - assert.Equal(t, "{threads}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.jetty.thread_pool.max": - assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.max"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.max") - validatedMetrics["apachedruid.jetty.thread_pool.max"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of maximum threads allocatable.", ms.At(i).Description()) - assert.Equal(t, "{threads}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.jetty.thread_pool.min": - assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.min"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.min") - validatedMetrics["apachedruid.jetty.thread_pool.min"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of minimum threads allocatable.", ms.At(i).Description()) - assert.Equal(t, "{threads}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.jetty.thread_pool.queue_size": - assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.queue_size"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.queue_size") - validatedMetrics["apachedruid.jetty.thread_pool.queue_size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Size of the worker queue.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.jetty.thread_pool.total": - assert.False(t, validatedMetrics["apachedruid.jetty.thread_pool.total"], "Found a duplicate in the metrics slice: apachedruid.jetty.thread_pool.total") - validatedMetrics["apachedruid.jetty.thread_pool.total"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of total workable threads allocated.", ms.At(i).Description()) - assert.Equal(t, "{threads}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.jvm.bufferpool.capacity": - assert.False(t, validatedMetrics["apachedruid.jvm.bufferpool.capacity"], "Found a duplicate in the metrics slice: apachedruid.jvm.bufferpool.capacity") - validatedMetrics["apachedruid.jvm.bufferpool.capacity"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Bufferpool capacity.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("bufferpool_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_bufferpool_name-val", attrVal.Str()) - case "apachedruid.jvm.bufferpool.count": - assert.False(t, validatedMetrics["apachedruid.jvm.bufferpool.count"], "Found a duplicate in the metrics slice: apachedruid.jvm.bufferpool.count") - validatedMetrics["apachedruid.jvm.bufferpool.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Bufferpool count.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("bufferpool_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_bufferpool_name-val", attrVal.Str()) - case "apachedruid.jvm.bufferpool.used": - assert.False(t, validatedMetrics["apachedruid.jvm.bufferpool.used"], "Found a duplicate in the metrics slice: apachedruid.jvm.bufferpool.used") - validatedMetrics["apachedruid.jvm.bufferpool.used"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Bufferpool used.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("bufferpool_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_bufferpool_name-val", attrVal.Str()) - case "apachedruid.jvm.gc.count": - assert.False(t, validatedMetrics["apachedruid.jvm.gc.count"], "Found a duplicate in the metrics slice: apachedruid.jvm.gc.count") - validatedMetrics["apachedruid.jvm.gc.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Garbage collection count.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("gc_gen") - assert.True(t, ok) - assert.EqualValues(t, "jvm_gc_gen-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("gc_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_gc_name-val", attrVal.Str()) - case "apachedruid.jvm.gc.cpu": - assert.False(t, validatedMetrics["apachedruid.jvm.gc.cpu"], "Found a duplicate in the metrics slice: apachedruid.jvm.gc.cpu") - validatedMetrics["apachedruid.jvm.gc.cpu"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Count of CPU time in Nanoseconds spent on garbage collection. Note, `jvm/gc/cpu` represents the total time over multiple GC cycles; divide by `jvm/gc/count` to get the mean GC time per cycle.", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("gc_gen") - assert.True(t, ok) - assert.EqualValues(t, "jvm_gc_gen-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("gc_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_gc_name-val", attrVal.Str()) - case "apachedruid.jvm.mem.committed": - assert.False(t, validatedMetrics["apachedruid.jvm.mem.committed"], "Found a duplicate in the metrics slice: apachedruid.jvm.mem.committed") - validatedMetrics["apachedruid.jvm.mem.committed"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Committed memory.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("mem_kind") - assert.True(t, ok) - assert.EqualValues(t, "jvm_mem_kind-val", attrVal.Str()) - case "apachedruid.jvm.mem.init": - assert.False(t, validatedMetrics["apachedruid.jvm.mem.init"], "Found a duplicate in the metrics slice: apachedruid.jvm.mem.init") - validatedMetrics["apachedruid.jvm.mem.init"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Initial memory.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("mem_kind") - assert.True(t, ok) - assert.EqualValues(t, "jvm_mem_kind-val", attrVal.Str()) - case "apachedruid.jvm.mem.max": - assert.False(t, validatedMetrics["apachedruid.jvm.mem.max"], "Found a duplicate in the metrics slice: apachedruid.jvm.mem.max") - validatedMetrics["apachedruid.jvm.mem.max"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Max memory.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("mem_kind") - assert.True(t, ok) - assert.EqualValues(t, "jvm_mem_kind-val", attrVal.Str()) - case "apachedruid.jvm.mem.used": - assert.False(t, validatedMetrics["apachedruid.jvm.mem.used"], "Found a duplicate in the metrics slice: apachedruid.jvm.mem.used") - validatedMetrics["apachedruid.jvm.mem.used"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Used memory.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("mem_kind") - assert.True(t, ok) - assert.EqualValues(t, "jvm_mem_kind-val", attrVal.Str()) - case "apachedruid.jvm.pool.committed": - assert.False(t, validatedMetrics["apachedruid.jvm.pool.committed"], "Found a duplicate in the metrics slice: apachedruid.jvm.pool.committed") - validatedMetrics["apachedruid.jvm.pool.committed"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Committed pool.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("pool_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_pool_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("pool_kind") - assert.True(t, ok) - assert.EqualValues(t, "jvm_pool_kind-val", attrVal.Str()) - case "apachedruid.jvm.pool.init": - assert.False(t, validatedMetrics["apachedruid.jvm.pool.init"], "Found a duplicate in the metrics slice: apachedruid.jvm.pool.init") - validatedMetrics["apachedruid.jvm.pool.init"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Initial pool.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("pool_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_pool_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("pool_kind") - assert.True(t, ok) - assert.EqualValues(t, "jvm_pool_kind-val", attrVal.Str()) - case "apachedruid.jvm.pool.max": - assert.False(t, validatedMetrics["apachedruid.jvm.pool.max"], "Found a duplicate in the metrics slice: apachedruid.jvm.pool.max") - validatedMetrics["apachedruid.jvm.pool.max"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Max pool.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("pool_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_pool_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("pool_kind") - assert.True(t, ok) - assert.EqualValues(t, "jvm_pool_kind-val", attrVal.Str()) - case "apachedruid.jvm.pool.used": - assert.False(t, validatedMetrics["apachedruid.jvm.pool.used"], "Found a duplicate in the metrics slice: apachedruid.jvm.pool.used") - validatedMetrics["apachedruid.jvm.pool.used"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Pool used.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("pool_name") - assert.True(t, ok) - assert.EqualValues(t, "jvm_pool_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("pool_kind") - assert.True(t, ok) - assert.EqualValues(t, "jvm_pool_kind-val", attrVal.Str()) - case "apachedruid.kill.pending_segments.count": - assert.False(t, validatedMetrics["apachedruid.kill.pending_segments.count"], "Found a duplicate in the metrics slice: apachedruid.kill.pending_segments.count") - validatedMetrics["apachedruid.kill.pending_segments.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of stale pending segments deleted from the metadata store.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "kill_data_source-val", attrVal.Str()) - case "apachedruid.kill.task.count": - assert.False(t, validatedMetrics["apachedruid.kill.task.count"], "Found a duplicate in the metrics slice: apachedruid.kill.task.count") - validatedMetrics["apachedruid.kill.task.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of tasks issued in the auto kill run.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.kill_task.available_slot.count": - assert.False(t, validatedMetrics["apachedruid.kill_task.available_slot.count"], "Found a duplicate in the metrics slice: apachedruid.kill_task.available_slot.count") - validatedMetrics["apachedruid.kill_task.available_slot.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of available task slots that can be used for auto kill tasks in the auto kill run. This is the max number of task slots minus any currently running auto kill tasks.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.kill_task.max_slot.count": - assert.False(t, validatedMetrics["apachedruid.kill_task.max_slot.count"], "Found a duplicate in the metrics slice: apachedruid.kill_task.max_slot.count") - validatedMetrics["apachedruid.kill_task.max_slot.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Maximum number of task slots available for auto kill tasks in the auto kill run.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.merge_buffer.pending_requests": - assert.False(t, validatedMetrics["apachedruid.merge_buffer.pending_requests"], "Found a duplicate in the metrics slice: apachedruid.merge_buffer.pending_requests") - validatedMetrics["apachedruid.merge_buffer.pending_requests"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of requests waiting to acquire a batch of buffers from the merge buffer pool.", ms.At(i).Description()) - assert.Equal(t, "{requests}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.metadata.kill.audit.count": - assert.False(t, validatedMetrics["apachedruid.metadata.kill.audit.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.audit.count") - validatedMetrics["apachedruid.metadata.kill.audit.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. This metric is emitted only when `druid.coordinator.kill.audit.on` is set to true.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.metadata.kill.compaction.count": - assert.False(t, validatedMetrics["apachedruid.metadata.kill.compaction.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.compaction.count") - validatedMetrics["apachedruid.metadata.kill.compaction.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. This metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.metadata.kill.datasource.count": - assert.False(t, validatedMetrics["apachedruid.metadata.kill.datasource.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.datasource.count") - validatedMetrics["apachedruid.metadata.kill.datasource.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run. Note that datasource metadata only exists for datasource created from supervisor. This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.metadata.kill.rule.count": - assert.False(t, validatedMetrics["apachedruid.metadata.kill.rule.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.rule.count") - validatedMetrics["apachedruid.metadata.kill.rule.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.rule.on` is set to true.", ms.At(i).Description()) - assert.Equal(t, "{rules}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.metadata.kill.supervisor.count": - assert.False(t, validatedMetrics["apachedruid.metadata.kill.supervisor.count"], "Found a duplicate in the metrics slice: apachedruid.metadata.kill.supervisor.count") - validatedMetrics["apachedruid.metadata.kill.supervisor.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true.", ms.At(i).Description()) - assert.Equal(t, "{supervisors}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.metadatacache.init.time": - assert.False(t, validatedMetrics["apachedruid.metadatacache.init.time"], "Found a duplicate in the metrics slice: apachedruid.metadatacache.init.time") - validatedMetrics["apachedruid.metadatacache.init.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time taken to initialize the broker segment metadata cache. Useful to detect if brokers are taking too long to start.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.metadatacache.refresh.count": - assert.False(t, validatedMetrics["apachedruid.metadatacache.refresh.count"], "Found a duplicate in the metrics slice: apachedruid.metadatacache.refresh.count") - validatedMetrics["apachedruid.metadatacache.refresh.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments to refresh in broker segment metadata cache.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.metadatacache.refresh.time": - assert.False(t, validatedMetrics["apachedruid.metadatacache.refresh.time"], "Found a duplicate in the metrics slice: apachedruid.metadatacache.refresh.time") - validatedMetrics["apachedruid.metadatacache.refresh.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time taken to refresh segments in broker segment metadata cache.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.byte_limit.exceeded.count": - assert.False(t, validatedMetrics["apachedruid.query.byte_limit.exceeded.count"], "Found a duplicate in the metrics slice: apachedruid.query.byte_limit.exceeded.count") - validatedMetrics["apachedruid.query.byte_limit.exceeded.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of queries whose inlined subquery results exceeded the given byte limit.", ms.At(i).Description()) - assert.Equal(t, "{queries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.bytes": - assert.False(t, validatedMetrics["apachedruid.query.bytes"], "Found a duplicate in the metrics slice: apachedruid.query.bytes") - validatedMetrics["apachedruid.query.bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The total number of bytes returned to the requesting client in the query response from the broker. Other services report the total bytes for their portion of the query.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "query_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("num_metrics") - assert.True(t, ok) - assert.EqualValues(t, "query_num_metrics-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("dimension") - assert.True(t, ok) - assert.EqualValues(t, "query_dimension-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("has_filters") - assert.True(t, ok) - assert.EqualValues(t, "query_has_filters-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("threshold") - assert.True(t, ok) - assert.EqualValues(t, 15, attrVal.Int()) - attrVal, ok = dp.Attributes().Get("num_complex_metrics") - assert.True(t, ok) - assert.EqualValues(t, 25, attrVal.Int()) - attrVal, ok = dp.Attributes().Get("type") - assert.True(t, ok) - assert.EqualValues(t, "query_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("remote_address") - assert.True(t, ok) - assert.EqualValues(t, "query_remote_address-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("context") - assert.True(t, ok) - assert.EqualValues(t, "query_context-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("num_dimensions") - assert.True(t, ok) - assert.EqualValues(t, "query_num_dimensions-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "query_interval-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("duration") - assert.True(t, ok) - assert.EqualValues(t, "query_duration-val", attrVal.Str()) - case "apachedruid.query.cache.delta.average_bytes": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.average_bytes"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.average_bytes") - validatedMetrics["apachedruid.query.cache.delta.average_bytes"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Average cache entry byte size.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.errors": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.errors"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.errors") - validatedMetrics["apachedruid.query.cache.delta.errors"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of cache errors.", ms.At(i).Description()) - assert.Equal(t, "{errors}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.evictions": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.evictions"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.evictions") - validatedMetrics["apachedruid.query.cache.delta.evictions"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of cache evictions.", ms.At(i).Description()) - assert.Equal(t, "{evictions}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.hit_rate": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.hit_rate"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.hit_rate") - validatedMetrics["apachedruid.query.cache.delta.hit_rate"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Cache hit rate.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - case "apachedruid.query.cache.delta.hits": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.hits"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.hits") - validatedMetrics["apachedruid.query.cache.delta.hits"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of cache hits.", ms.At(i).Description()) - assert.Equal(t, "{hits}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.misses": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.misses"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.misses") - validatedMetrics["apachedruid.query.cache.delta.misses"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of cache misses.", ms.At(i).Description()) - assert.Equal(t, "{misses}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.num_entries": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.num_entries"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.num_entries") - validatedMetrics["apachedruid.query.cache.delta.num_entries"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of cache entries.", ms.At(i).Description()) - assert.Equal(t, "{entries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.put.error": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.put.error"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.put.error") - validatedMetrics["apachedruid.query.cache.delta.put.error"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of new cache entries that could not be cached due to errors.", ms.At(i).Description()) - assert.Equal(t, "{errors}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.put.ok": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.put.ok"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.put.ok") - validatedMetrics["apachedruid.query.cache.delta.put.ok"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of new cache entries successfully cached.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.put.oversized": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.put.oversized"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.put.oversized") - validatedMetrics["apachedruid.query.cache.delta.put.oversized"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties).", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.size_bytes": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.size_bytes"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.size_bytes") - validatedMetrics["apachedruid.query.cache.delta.size_bytes"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Size in bytes of cache entries.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.delta.timeouts": - assert.False(t, validatedMetrics["apachedruid.query.cache.delta.timeouts"], "Found a duplicate in the metrics slice: apachedruid.query.cache.delta.timeouts") - validatedMetrics["apachedruid.query.cache.delta.timeouts"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of cache timeouts.", ms.At(i).Description()) - assert.Equal(t, "{timeouts}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.memcached.delta": - assert.False(t, validatedMetrics["apachedruid.query.cache.memcached.delta"], "Found a duplicate in the metrics slice: apachedruid.query.cache.memcached.delta") - validatedMetrics["apachedruid.query.cache.memcached.delta"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their delta from the prior event emission.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.memcached.total": - assert.False(t, validatedMetrics["apachedruid.query.cache.memcached.total"], "Found a duplicate in the metrics slice: apachedruid.query.cache.memcached.total") - validatedMetrics["apachedruid.query.cache.memcached.total"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their actual values.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.average_bytes": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.average_bytes"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.average_bytes") - validatedMetrics["apachedruid.query.cache.total.average_bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average cache entry byte size.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.errors": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.errors"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.errors") - validatedMetrics["apachedruid.query.cache.total.errors"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of cache errors.", ms.At(i).Description()) - assert.Equal(t, "{errors}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.evictions": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.evictions"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.evictions") - validatedMetrics["apachedruid.query.cache.total.evictions"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of cache evictions.", ms.At(i).Description()) - assert.Equal(t, "{evictions}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.hit_rate": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.hit_rate"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.hit_rate") - validatedMetrics["apachedruid.query.cache.total.hit_rate"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Cache hit rate.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - case "apachedruid.query.cache.total.hits": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.hits"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.hits") - validatedMetrics["apachedruid.query.cache.total.hits"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of cache hits.", ms.At(i).Description()) - assert.Equal(t, "{hits}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.misses": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.misses"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.misses") - validatedMetrics["apachedruid.query.cache.total.misses"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of cache misses.", ms.At(i).Description()) - assert.Equal(t, "{misses}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.num_entries": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.num_entries"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.num_entries") - validatedMetrics["apachedruid.query.cache.total.num_entries"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of cache entries.", ms.At(i).Description()) - assert.Equal(t, "{entries}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.put.error": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.put.error"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.put.error") - validatedMetrics["apachedruid.query.cache.total.put.error"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of new cache entries that could not be cached due to errors.", ms.At(i).Description()) - assert.Equal(t, "{errors}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.put.ok": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.put.ok"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.put.ok") - validatedMetrics["apachedruid.query.cache.total.put.ok"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of new cache entries successfully cached.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.put.oversized": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.put.oversized"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.put.oversized") - validatedMetrics["apachedruid.query.cache.total.put.oversized"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties).", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.size_bytes": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.size_bytes"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.size_bytes") - validatedMetrics["apachedruid.query.cache.total.size_bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Size in bytes of cache entries.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cache.total.timeouts": - assert.False(t, validatedMetrics["apachedruid.query.cache.total.timeouts"], "Found a duplicate in the metrics slice: apachedruid.query.cache.total.timeouts") - validatedMetrics["apachedruid.query.cache.total.timeouts"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of cache timeouts.", ms.At(i).Description()) - assert.Equal(t, "{timeouts}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.count": - assert.False(t, validatedMetrics["apachedruid.query.count"], "Found a duplicate in the metrics slice: apachedruid.query.count") - validatedMetrics["apachedruid.query.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of total queries.", ms.At(i).Description()) - assert.Equal(t, "{queries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.cpu.time": - assert.False(t, validatedMetrics["apachedruid.query.cpu.time"], "Found a duplicate in the metrics slice: apachedruid.query.cpu.time") - validatedMetrics["apachedruid.query.cpu.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Microseconds of CPU time taken to complete a query.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "query_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("num_metrics") - assert.True(t, ok) - assert.EqualValues(t, "query_num_metrics-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("dimension") - assert.True(t, ok) - assert.EqualValues(t, "query_dimension-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("has_filters") - assert.True(t, ok) - assert.EqualValues(t, "query_has_filters-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("threshold") - assert.True(t, ok) - assert.EqualValues(t, 15, attrVal.Int()) - attrVal, ok = dp.Attributes().Get("num_complex_metrics") - assert.True(t, ok) - assert.EqualValues(t, 25, attrVal.Int()) - attrVal, ok = dp.Attributes().Get("type") - assert.True(t, ok) - assert.EqualValues(t, "query_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("remote_address") - assert.True(t, ok) - assert.EqualValues(t, "query_remote_address-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("context") - assert.True(t, ok) - assert.EqualValues(t, "query_context-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("num_dimensions") - assert.True(t, ok) - assert.EqualValues(t, "query_num_dimensions-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "query_interval-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("duration") - assert.True(t, ok) - assert.EqualValues(t, "query_duration-val", attrVal.Str()) - case "apachedruid.query.failed.count": - assert.False(t, validatedMetrics["apachedruid.query.failed.count"], "Found a duplicate in the metrics slice: apachedruid.query.failed.count") - validatedMetrics["apachedruid.query.failed.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of failed queries.", ms.At(i).Description()) - assert.Equal(t, "{queries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.interrupted.count": - assert.False(t, validatedMetrics["apachedruid.query.interrupted.count"], "Found a duplicate in the metrics slice: apachedruid.query.interrupted.count") - validatedMetrics["apachedruid.query.interrupted.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of queries interrupted due to cancellation.", ms.At(i).Description()) - assert.Equal(t, "{queries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.node.backpressure": - assert.False(t, validatedMetrics["apachedruid.query.node.backpressure"], "Found a duplicate in the metrics slice: apachedruid.query.node.backpressure") - validatedMetrics["apachedruid.query.node.backpressure"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds that the channel to this process has spent suspended due to backpressure.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("status") - assert.True(t, ok) - assert.EqualValues(t, "query_status-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "query_server-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - case "apachedruid.query.node.bytes": - assert.False(t, validatedMetrics["apachedruid.query.node.bytes"], "Found a duplicate in the metrics slice: apachedruid.query.node.bytes") - validatedMetrics["apachedruid.query.node.bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of bytes returned from querying individual historical/realtime processes.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("status") - assert.True(t, ok) - assert.EqualValues(t, "query_status-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "query_server-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - case "apachedruid.query.node.time": - assert.False(t, validatedMetrics["apachedruid.query.node.time"], "Found a duplicate in the metrics slice: apachedruid.query.node.time") - validatedMetrics["apachedruid.query.node.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to query individual historical/realtime processes.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("status") - assert.True(t, ok) - assert.EqualValues(t, "query_status-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "query_server-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - case "apachedruid.query.node.ttfb": - assert.False(t, validatedMetrics["apachedruid.query.node.ttfb"], "Found a duplicate in the metrics slice: apachedruid.query.node.ttfb") - validatedMetrics["apachedruid.query.node.ttfb"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("status") - assert.True(t, ok) - assert.EqualValues(t, "query_status-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "query_server-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - case "apachedruid.query.priority": - assert.False(t, validatedMetrics["apachedruid.query.priority"], "Found a duplicate in the metrics slice: apachedruid.query.priority") - validatedMetrics["apachedruid.query.priority"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Assigned lane and priority, only if Laning strategy is enabled. Refer to [Laning strategies](https,//druid.apache.org/docs/latest/configuration#laning-strategies).", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("type") - assert.True(t, ok) - assert.EqualValues(t, "query_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "query_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("lane") - assert.True(t, ok) - assert.EqualValues(t, "query_lane-val", attrVal.Str()) - case "apachedruid.query.row_limit.exceeded.count": - assert.False(t, validatedMetrics["apachedruid.query.row_limit.exceeded.count"], "Found a duplicate in the metrics slice: apachedruid.query.row_limit.exceeded.count") - validatedMetrics["apachedruid.query.row_limit.exceeded.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of queries whose inlined subquery results exceeded the given row limit.", ms.At(i).Description()) - assert.Equal(t, "{queries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.segment.time": - assert.False(t, validatedMetrics["apachedruid.query.segment.time"], "Found a duplicate in the metrics slice: apachedruid.query.segment.time") - validatedMetrics["apachedruid.query.segment.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to query individual segment. Includes time to page in the segment from disk.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("status") - assert.True(t, ok) - assert.EqualValues(t, "query_status-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("segment") - assert.True(t, ok) - assert.EqualValues(t, "query_segment-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("vectorized") - assert.True(t, ok) - assert.EqualValues(t, "query_vectorized-val", attrVal.Str()) - case "apachedruid.query.segment_and_cache.time": - assert.False(t, validatedMetrics["apachedruid.query.segment_and_cache.time"], "Found a duplicate in the metrics slice: apachedruid.query.segment_and_cache.time") - validatedMetrics["apachedruid.query.segment_and_cache.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process).", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("segment") - assert.True(t, ok) - assert.EqualValues(t, "query_segment-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - case "apachedruid.query.segments.count": - assert.False(t, validatedMetrics["apachedruid.query.segments.count"], "Found a duplicate in the metrics slice: apachedruid.query.segments.count") - validatedMetrics["apachedruid.query.segments.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "This metric is not enabled by default. See the `QueryMetrics` Interface for reference regarding enabling this metric. Number of segments that will be touched by the query. In the broker, it makes a plan to distribute the query to realtime tasks and historicals based on a snapshot of segment distribution state. If there are some segments moved after this snapshot is created, certain historicals and realtime tasks can report those segments as missing to the broker. The broker will resend the query to the new servers that serve those segments after move. In this case, those segments can be counted more than once in this metric.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.success.count": - assert.False(t, validatedMetrics["apachedruid.query.success.count"], "Found a duplicate in the metrics slice: apachedruid.query.success.count") - validatedMetrics["apachedruid.query.success.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of queries successfully processed.", ms.At(i).Description()) - assert.Equal(t, "{queries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.time": - assert.False(t, validatedMetrics["apachedruid.query.time"], "Found a duplicate in the metrics slice: apachedruid.query.time") - validatedMetrics["apachedruid.query.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to complete a query.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "query_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("num_metrics") - assert.True(t, ok) - assert.EqualValues(t, "query_num_metrics-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("dimension") - assert.True(t, ok) - assert.EqualValues(t, "query_dimension-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("has_filters") - assert.True(t, ok) - assert.EqualValues(t, "query_has_filters-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("threshold") - assert.True(t, ok) - assert.EqualValues(t, 15, attrVal.Int()) - attrVal, ok = dp.Attributes().Get("num_complex_metrics") - assert.True(t, ok) - assert.EqualValues(t, 25, attrVal.Int()) - attrVal, ok = dp.Attributes().Get("type") - assert.True(t, ok) - assert.EqualValues(t, "query_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("remote_address") - assert.True(t, ok) - assert.EqualValues(t, "query_remote_address-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("context") - assert.True(t, ok) - assert.EqualValues(t, "query_context-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("num_dimensions") - assert.True(t, ok) - assert.EqualValues(t, "query_num_dimensions-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "query_interval-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("duration") - assert.True(t, ok) - assert.EqualValues(t, "query_duration-val", attrVal.Str()) - case "apachedruid.query.timeout.count": - assert.False(t, validatedMetrics["apachedruid.query.timeout.count"], "Found a duplicate in the metrics slice: apachedruid.query.timeout.count") - validatedMetrics["apachedruid.query.timeout.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of timed out queries.", ms.At(i).Description()) - assert.Equal(t, "{queries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.query.wait.time": - assert.False(t, validatedMetrics["apachedruid.query.wait.time"], "Found a duplicate in the metrics slice: apachedruid.query.wait.time") - validatedMetrics["apachedruid.query.wait.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds spent waiting for a segment to be scanned.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("segment") - assert.True(t, ok) - assert.EqualValues(t, "query_segment-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "query_id-val", attrVal.Str()) - case "apachedruid.segment.added.bytes": - assert.False(t, validatedMetrics["apachedruid.segment.added.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.added.bytes") - validatedMetrics["apachedruid.segment.added.bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Size in bytes of new segments created.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "segment_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "segment_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "segment_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "segment_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "segment_interval-val", attrVal.Str()) - case "apachedruid.segment.assign_skipped.count": - assert.False(t, validatedMetrics["apachedruid.segment.assign_skipped.count"], "Found a duplicate in the metrics slice: apachedruid.segment.assign_skipped.count") - validatedMetrics["apachedruid.segment.assign_skipped.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments that could not be assigned to any server for loading. This can occur due to replication throttling, no available disk space, or a full load queue.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("description") - assert.True(t, ok) - assert.EqualValues(t, "segment_description-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.assigned.count": - assert.False(t, validatedMetrics["apachedruid.segment.assigned.count"], "Found a duplicate in the metrics slice: apachedruid.segment.assigned.count") - validatedMetrics["apachedruid.segment.assigned.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of segments assigned to be loaded in the cluster.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.compacted.bytes": - assert.False(t, validatedMetrics["apachedruid.segment.compacted.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.compacted.bytes") - validatedMetrics["apachedruid.segment.compacted.bytes"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Total bytes of this datasource that are already compacted with the spec set in the auto compaction config.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.compacted.count": - assert.False(t, validatedMetrics["apachedruid.segment.compacted.count"], "Found a duplicate in the metrics slice: apachedruid.segment.compacted.count") - validatedMetrics["apachedruid.segment.compacted.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Total number of segments of this datasource that are already compacted with the spec set in the auto compaction config.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.count": - assert.False(t, validatedMetrics["apachedruid.segment.count"], "Found a duplicate in the metrics slice: apachedruid.segment.count") - validatedMetrics["apachedruid.segment.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("priority") - assert.True(t, ok) - assert.EqualValues(t, "segment_priority-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.deleted.count": - assert.False(t, validatedMetrics["apachedruid.segment.deleted.count"], "Found a duplicate in the metrics slice: apachedruid.segment.deleted.count") - validatedMetrics["apachedruid.segment.deleted.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of segments marked as unused due to drop rules.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.drop_queue.count": - assert.False(t, validatedMetrics["apachedruid.segment.drop_queue.count"], "Found a duplicate in the metrics slice: apachedruid.segment.drop_queue.count") - validatedMetrics["apachedruid.segment.drop_queue.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments to drop.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "segment_server-val", attrVal.Str()) - case "apachedruid.segment.drop_skipped.count": - assert.False(t, validatedMetrics["apachedruid.segment.drop_skipped.count"], "Found a duplicate in the metrics slice: apachedruid.segment.drop_skipped.count") - validatedMetrics["apachedruid.segment.drop_skipped.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments that could not be dropped from any server.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("description") - assert.True(t, ok) - assert.EqualValues(t, "segment_description-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.dropped.count": - assert.False(t, validatedMetrics["apachedruid.segment.dropped.count"], "Found a duplicate in the metrics slice: apachedruid.segment.dropped.count") - validatedMetrics["apachedruid.segment.dropped.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of segments chosen to be dropped from the cluster due to being over-replicated.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.load_queue.assigned": - assert.False(t, validatedMetrics["apachedruid.segment.load_queue.assigned"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.assigned") - validatedMetrics["apachedruid.segment.load_queue.assigned"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of segments assigned for load or drop to the load queue of a server.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "segment_server-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.load_queue.cancelled": - assert.False(t, validatedMetrics["apachedruid.segment.load_queue.cancelled"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.cancelled") - validatedMetrics["apachedruid.segment.load_queue.cancelled"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of segment assignments that were canceled before completion.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "segment_server-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.load_queue.count": - assert.False(t, validatedMetrics["apachedruid.segment.load_queue.count"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.count") - validatedMetrics["apachedruid.segment.load_queue.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments to load.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "segment_server-val", attrVal.Str()) - case "apachedruid.segment.load_queue.failed": - assert.False(t, validatedMetrics["apachedruid.segment.load_queue.failed"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.failed") - validatedMetrics["apachedruid.segment.load_queue.failed"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of segment assignments that failed to complete.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "segment_server-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.load_queue.size": - assert.False(t, validatedMetrics["apachedruid.segment.load_queue.size"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.size") - validatedMetrics["apachedruid.segment.load_queue.size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Size in bytes of segments to load.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "segment_server-val", attrVal.Str()) - case "apachedruid.segment.load_queue.success": - assert.False(t, validatedMetrics["apachedruid.segment.load_queue.success"], "Found a duplicate in the metrics slice: apachedruid.segment.load_queue.success") - validatedMetrics["apachedruid.segment.load_queue.success"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of segment assignments that completed successfully.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "segment_server-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.max": - assert.False(t, validatedMetrics["apachedruid.segment.max"], "Found a duplicate in the metrics slice: apachedruid.segment.max") - validatedMetrics["apachedruid.segment.max"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Maximum byte limit available for segments.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.segment.move_skipped.count": - assert.False(t, validatedMetrics["apachedruid.segment.move_skipped.count"], "Found a duplicate in the metrics slice: apachedruid.segment.move_skipped.count") - validatedMetrics["apachedruid.segment.move_skipped.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments that were chosen for balancing but could not be moved. This can occur when segments are already optimally placed.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("description") - assert.True(t, ok) - assert.EqualValues(t, "segment_description-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.moved.bytes": - assert.False(t, validatedMetrics["apachedruid.segment.moved.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.moved.bytes") - validatedMetrics["apachedruid.segment.moved.bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Size in bytes of segments moved/archived via the Move Task.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "segment_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "segment_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "segment_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "segment_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "segment_interval-val", attrVal.Str()) - case "apachedruid.segment.moved.count": - assert.False(t, validatedMetrics["apachedruid.segment.moved.count"], "Found a duplicate in the metrics slice: apachedruid.segment.moved.count") - validatedMetrics["apachedruid.segment.moved.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of segments moved in the cluster.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.nuked.bytes": - assert.False(t, validatedMetrics["apachedruid.segment.nuked.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.nuked.bytes") - validatedMetrics["apachedruid.segment.nuked.bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Size in bytes of segments deleted via the Kill Task.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "segment_task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "segment_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "segment_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "segment_task_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "segment_interval-val", attrVal.Str()) - case "apachedruid.segment.over_shadowed.count": - assert.False(t, validatedMetrics["apachedruid.segment.over_shadowed.count"], "Found a duplicate in the metrics slice: apachedruid.segment.over_shadowed.count") - validatedMetrics["apachedruid.segment.over_shadowed.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments marked as unused due to being overshadowed.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.segment.pending_delete": - assert.False(t, validatedMetrics["apachedruid.segment.pending_delete"], "Found a duplicate in the metrics slice: apachedruid.segment.pending_delete") - validatedMetrics["apachedruid.segment.pending_delete"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "On-disk size in bytes of segments that are waiting to be cleared out.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.segment.row_count.avg": - assert.False(t, validatedMetrics["apachedruid.segment.row_count.avg"], "Found a duplicate in the metrics slice: apachedruid.segment.row_count.avg") - validatedMetrics["apachedruid.segment.row_count.avg"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The average number of rows per segment on a historical. `SegmentStatsMonitor` must be enabled.", ms.At(i).Description()) - assert.Equal(t, "{rows}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("priority") - assert.True(t, ok) - assert.EqualValues(t, "segment_priority-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.row_count.range.count": - assert.False(t, validatedMetrics["apachedruid.segment.row_count.range.count"], "Found a duplicate in the metrics slice: apachedruid.segment.row_count.range.count") - validatedMetrics["apachedruid.segment.row_count.range.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of segments in a bucket. `SegmentStatsMonitor` must be enabled.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("priority") - assert.True(t, ok) - assert.EqualValues(t, "segment_priority-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("range") - assert.True(t, ok) - assert.EqualValues(t, "segment_range-val", attrVal.Str()) - case "apachedruid.segment.scan.active": - assert.False(t, validatedMetrics["apachedruid.segment.scan.active"], "Found a duplicate in the metrics slice: apachedruid.segment.scan.active") - validatedMetrics["apachedruid.segment.scan.active"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments currently scanned. This metric also indicates how many threads from `druid.processing.numThreads` are currently being used.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.segment.scan.pending": - assert.False(t, validatedMetrics["apachedruid.segment.scan.pending"], "Found a duplicate in the metrics slice: apachedruid.segment.scan.pending") - validatedMetrics["apachedruid.segment.scan.pending"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments in queue waiting to be scanned.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.segment.size": - assert.False(t, validatedMetrics["apachedruid.segment.size"], "Found a duplicate in the metrics slice: apachedruid.segment.size") - validatedMetrics["apachedruid.segment.size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.skip_compact.bytes": - assert.False(t, validatedMetrics["apachedruid.segment.skip_compact.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.skip_compact.bytes") - validatedMetrics["apachedruid.segment.skip_compact.bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total bytes of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.skip_compact.count": - assert.False(t, validatedMetrics["apachedruid.segment.skip_compact.count"], "Found a duplicate in the metrics slice: apachedruid.segment.skip_compact.count") - validatedMetrics["apachedruid.segment.skip_compact.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of segments of this datasource that are skipped (not eligible for auto compaction) by the auto compaction.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.unavailable.count": - assert.False(t, validatedMetrics["apachedruid.segment.unavailable.count"], "Found a duplicate in the metrics slice: apachedruid.segment.unavailable.count") - validatedMetrics["apachedruid.segment.unavailable.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of unique segments left to load until all used segments are available for queries.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.under_replicated.count": - assert.False(t, validatedMetrics["apachedruid.segment.under_replicated.count"], "Found a duplicate in the metrics slice: apachedruid.segment.under_replicated.count") - validatedMetrics["apachedruid.segment.under_replicated.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments, including replicas, left to load until all used segments are available for queries.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.unneeded.count": - assert.False(t, validatedMetrics["apachedruid.segment.unneeded.count"], "Found a duplicate in the metrics slice: apachedruid.segment.unneeded.count") - validatedMetrics["apachedruid.segment.unneeded.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of segments dropped due to being marked as unused.", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.used": - assert.False(t, validatedMetrics["apachedruid.segment.used"], "Found a duplicate in the metrics slice: apachedruid.segment.used") - validatedMetrics["apachedruid.segment.used"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Bytes used for served segments.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("priority") - assert.True(t, ok) - assert.EqualValues(t, "segment_priority-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.used_percent": - assert.False(t, validatedMetrics["apachedruid.segment.used_percent"], "Found a duplicate in the metrics slice: apachedruid.segment.used_percent") - validatedMetrics["apachedruid.segment.used_percent"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Percentage of space used by served segments.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("priority") - assert.True(t, ok) - assert.EqualValues(t, "segment_priority-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "segment_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.wait_compact.bytes": - assert.False(t, validatedMetrics["apachedruid.segment.wait_compact.bytes"], "Found a duplicate in the metrics slice: apachedruid.segment.wait_compact.bytes") - validatedMetrics["apachedruid.segment.wait_compact.bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total bytes of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.segment.wait_compact.count": - assert.False(t, validatedMetrics["apachedruid.segment.wait_compact.count"], "Found a duplicate in the metrics slice: apachedruid.segment.wait_compact.count") - validatedMetrics["apachedruid.segment.wait_compact.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total number of segments of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction).", ms.At(i).Description()) - assert.Equal(t, "{segments}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "segment_data_source-val", attrVal.Str()) - case "apachedruid.serverview.init.time": - assert.False(t, validatedMetrics["apachedruid.serverview.init.time"], "Found a duplicate in the metrics slice: apachedruid.serverview.init.time") - validatedMetrics["apachedruid.serverview.init.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time taken to initialize the broker server view. Useful to detect if brokers are taking too long to start.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.serverview.sync.healthy": - assert.False(t, validatedMetrics["apachedruid.serverview.sync.healthy"], "Found a duplicate in the metrics slice: apachedruid.serverview.sync.healthy") - validatedMetrics["apachedruid.serverview.sync.healthy"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Sync status of the Broker with a segment-loading server such as a Historical or Peon. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. This metric can be used in conjunction with `serverview/sync/unstableTime` to debug slow startup of Brokers.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "serverview_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "serverview_server-val", attrVal.Str()) - case "apachedruid.serverview.sync.unstable_time": - assert.False(t, validatedMetrics["apachedruid.serverview.sync.unstable_time"], "Found a duplicate in the metrics slice: apachedruid.serverview.sync.unstable_time") - validatedMetrics["apachedruid.serverview.sync.unstable_time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time in milliseconds for which the Broker has been failing to sync with a segment-loading server. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "serverview_tier-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("server") - assert.True(t, ok) - assert.EqualValues(t, "serverview_server-val", attrVal.Str()) - case "apachedruid.sql_query.bytes": - assert.False(t, validatedMetrics["apachedruid.sql_query.bytes"], "Found a duplicate in the metrics slice: apachedruid.sql_query.bytes") - validatedMetrics["apachedruid.sql_query.bytes"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of bytes returned in the SQL query response.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("native_query_ids") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_native_query_ids-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("engine") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_engine-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("remote_address") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_remote_address-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("success") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_success-val", attrVal.Str()) - case "apachedruid.sql_query.planning_time_ms": - assert.False(t, validatedMetrics["apachedruid.sql_query.planning_time_ms"], "Found a duplicate in the metrics slice: apachedruid.sql_query.planning_time_ms") - validatedMetrics["apachedruid.sql_query.planning_time_ms"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to plan a SQL to native query.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("native_query_ids") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_native_query_ids-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("engine") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_engine-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("remote_address") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_remote_address-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("success") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_success-val", attrVal.Str()) - case "apachedruid.sql_query.time": - assert.False(t, validatedMetrics["apachedruid.sql_query.time"], "Found a duplicate in the metrics slice: apachedruid.sql_query.time") - validatedMetrics["apachedruid.sql_query.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to complete a SQL query.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("native_query_ids") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_native_query_ids-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("engine") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_engine-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("remote_address") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_remote_address-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("id") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("success") - assert.True(t, ok) - assert.EqualValues(t, "sqlQuery_success-val", attrVal.Str()) - case "apachedruid.subquery.byte_limit.count": - assert.False(t, validatedMetrics["apachedruid.subquery.byte_limit.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.byte_limit.count") - validatedMetrics["apachedruid.subquery.byte_limit.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of subqueries whose results are materialized as frames (Druid's internal byte representation of rows).", ms.At(i).Description()) - assert.Equal(t, "{subqueries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.subquery.fallback.count": - assert.False(t, validatedMetrics["apachedruid.subquery.fallback.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.fallback.count") - validatedMetrics["apachedruid.subquery.fallback.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of subqueries which cannot be materialized as frames.", ms.At(i).Description()) - assert.Equal(t, "{subqueries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.subquery.fallback.insufficient_type.count": - assert.False(t, validatedMetrics["apachedruid.subquery.fallback.insufficient_type.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.fallback.insufficient_type.count") - validatedMetrics["apachedruid.subquery.fallback.insufficient_type.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of subqueries which cannot be materialized as frames due to insufficient type information in the row signature.", ms.At(i).Description()) - assert.Equal(t, "{subqueries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.subquery.fallback.unknown_reason.count": - assert.False(t, validatedMetrics["apachedruid.subquery.fallback.unknown_reason.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.fallback.unknown_reason.count") - validatedMetrics["apachedruid.subquery.fallback.unknown_reason.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of subqueries which cannot be materialized as frames due other reasons.", ms.At(i).Description()) - assert.Equal(t, "{subqueries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.subquery.row_limit.count": - assert.False(t, validatedMetrics["apachedruid.subquery.row_limit.count"], "Found a duplicate in the metrics slice: apachedruid.subquery.row_limit.count") - validatedMetrics["apachedruid.subquery.row_limit.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of subqueries whose results are materialized as rows (Java objects on heap).", ms.At(i).Description()) - assert.Equal(t, "{subqueries}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.cpu": - assert.False(t, validatedMetrics["apachedruid.sys.cpu"], "Found a duplicate in the metrics slice: apachedruid.sys.cpu") - validatedMetrics["apachedruid.sys.cpu"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "CPU used.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("cpu_time") - assert.True(t, ok) - assert.EqualValues(t, "sys_cpu_time-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("cpu_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_cpu_name-val", attrVal.Str()) - case "apachedruid.sys.disk.queue": - assert.False(t, validatedMetrics["apachedruid.sys.disk.queue"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.queue") - validatedMetrics["apachedruid.sys.disk.queue"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Disk queue length. Measures number of requests waiting to be processed by disk.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("disk_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) - case "apachedruid.sys.disk.read.count": - assert.False(t, validatedMetrics["apachedruid.sys.disk.read.count"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.read.count") - validatedMetrics["apachedruid.sys.disk.read.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Reads from disk.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("disk_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) - case "apachedruid.sys.disk.read.size": - assert.False(t, validatedMetrics["apachedruid.sys.disk.read.size"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.read.size") - validatedMetrics["apachedruid.sys.disk.read.size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Bytes read from disk. One indicator of the amount of paging occurring for segments.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("disk_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) - case "apachedruid.sys.disk.transfer_time": - assert.False(t, validatedMetrics["apachedruid.sys.disk.transfer_time"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.transfer_time") - validatedMetrics["apachedruid.sys.disk.transfer_time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Transfer time to read from or write to disk.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("disk_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) - case "apachedruid.sys.disk.write.count": - assert.False(t, validatedMetrics["apachedruid.sys.disk.write.count"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.write.count") - validatedMetrics["apachedruid.sys.disk.write.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Writes to disk.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("disk_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) - case "apachedruid.sys.disk.write.size": - assert.False(t, validatedMetrics["apachedruid.sys.disk.write.size"], "Found a duplicate in the metrics slice: apachedruid.sys.disk.write.size") - validatedMetrics["apachedruid.sys.disk.write.size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Bytes written to disk. One indicator of the amount of paging occurring for segments.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("disk_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_disk_name-val", attrVal.Str()) - case "apachedruid.sys.fs.files.count": - assert.False(t, validatedMetrics["apachedruid.sys.fs.files.count"], "Found a duplicate in the metrics slice: apachedruid.sys.fs.files.count") - validatedMetrics["apachedruid.sys.fs.files.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Filesystem total IO nodes.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("fs_dir_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("fs_dev_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dev_name-val", attrVal.Str()) - case "apachedruid.sys.fs.files.free": - assert.False(t, validatedMetrics["apachedruid.sys.fs.files.free"], "Found a duplicate in the metrics slice: apachedruid.sys.fs.files.free") - validatedMetrics["apachedruid.sys.fs.files.free"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Filesystem free IO nodes.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("fs_dir_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("fs_dev_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dev_name-val", attrVal.Str()) - case "apachedruid.sys.fs.max": - assert.False(t, validatedMetrics["apachedruid.sys.fs.max"], "Found a duplicate in the metrics slice: apachedruid.sys.fs.max") - validatedMetrics["apachedruid.sys.fs.max"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Filesystem bytes max.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("fs_dir_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("fs_dev_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dev_name-val", attrVal.Str()) - case "apachedruid.sys.fs.used": - assert.False(t, validatedMetrics["apachedruid.sys.fs.used"], "Found a duplicate in the metrics slice: apachedruid.sys.fs.used") - validatedMetrics["apachedruid.sys.fs.used"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Filesystem bytes used.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("fs_dir_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("fs_dev_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dev_name-val", attrVal.Str()) - case "apachedruid.sys.la.1": - assert.False(t, validatedMetrics["apachedruid.sys.la.1"], "Found a duplicate in the metrics slice: apachedruid.sys.la.1") - validatedMetrics["apachedruid.sys.la.1"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "System CPU load averages over past `i` minutes, where `i={1,5,15}`.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.la.15": - assert.False(t, validatedMetrics["apachedruid.sys.la.15"], "Found a duplicate in the metrics slice: apachedruid.sys.la.15") - validatedMetrics["apachedruid.sys.la.15"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "System CPU load averages over past `i` minutes, where `i={1,5,15}`.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.la.5": - assert.False(t, validatedMetrics["apachedruid.sys.la.5"], "Found a duplicate in the metrics slice: apachedruid.sys.la.5") - validatedMetrics["apachedruid.sys.la.5"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "System CPU load averages over past `i` minutes, where `i={1,5,15}`.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.mem.free": - assert.False(t, validatedMetrics["apachedruid.sys.mem.free"], "Found a duplicate in the metrics slice: apachedruid.sys.mem.free") - validatedMetrics["apachedruid.sys.mem.free"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Memory free.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.mem.max": - assert.False(t, validatedMetrics["apachedruid.sys.mem.max"], "Found a duplicate in the metrics slice: apachedruid.sys.mem.max") - validatedMetrics["apachedruid.sys.mem.max"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Memory max.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.mem.used": - assert.False(t, validatedMetrics["apachedruid.sys.mem.used"], "Found a duplicate in the metrics slice: apachedruid.sys.mem.used") - validatedMetrics["apachedruid.sys.mem.used"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Memory used.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.net.read.dropped": - assert.False(t, validatedMetrics["apachedruid.sys.net.read.dropped"], "Found a duplicate in the metrics slice: apachedruid.sys.net.read.dropped") - validatedMetrics["apachedruid.sys.net.read.dropped"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total packets dropped coming from network.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("net_hwaddr") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_address") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) - case "apachedruid.sys.net.read.errors": - assert.False(t, validatedMetrics["apachedruid.sys.net.read.errors"], "Found a duplicate in the metrics slice: apachedruid.sys.net.read.errors") - validatedMetrics["apachedruid.sys.net.read.errors"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total network read errors.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("net_hwaddr") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_address") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) - case "apachedruid.sys.net.read.packets": - assert.False(t, validatedMetrics["apachedruid.sys.net.read.packets"], "Found a duplicate in the metrics slice: apachedruid.sys.net.read.packets") - validatedMetrics["apachedruid.sys.net.read.packets"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total packets read from the network.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("net_hwaddr") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_address") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) - case "apachedruid.sys.net.read.size": - assert.False(t, validatedMetrics["apachedruid.sys.net.read.size"], "Found a duplicate in the metrics slice: apachedruid.sys.net.read.size") - validatedMetrics["apachedruid.sys.net.read.size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Bytes read from the network.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("net_hwaddr") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_address") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) - case "apachedruid.sys.net.write.collisions": - assert.False(t, validatedMetrics["apachedruid.sys.net.write.collisions"], "Found a duplicate in the metrics slice: apachedruid.sys.net.write.collisions") - validatedMetrics["apachedruid.sys.net.write.collisions"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total network write collisions.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("net_hwaddr") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_address") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) - case "apachedruid.sys.net.write.errors": - assert.False(t, validatedMetrics["apachedruid.sys.net.write.errors"], "Found a duplicate in the metrics slice: apachedruid.sys.net.write.errors") - validatedMetrics["apachedruid.sys.net.write.errors"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total network write errors.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("net_hwaddr") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_address") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) - case "apachedruid.sys.net.write.packets": - assert.False(t, validatedMetrics["apachedruid.sys.net.write.packets"], "Found a duplicate in the metrics slice: apachedruid.sys.net.write.packets") - validatedMetrics["apachedruid.sys.net.write.packets"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total packets written to the network.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("net_hwaddr") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_address") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) - case "apachedruid.sys.net.write.size": - assert.False(t, validatedMetrics["apachedruid.sys.net.write.size"], "Found a duplicate in the metrics slice: apachedruid.sys.net.write.size") - validatedMetrics["apachedruid.sys.net.write.size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Bytes written to the network.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("net_hwaddr") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_hwaddr-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_name-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("net_address") - assert.True(t, ok) - assert.EqualValues(t, "sys_net_address-val", attrVal.Str()) - case "apachedruid.sys.storage.used": - assert.False(t, validatedMetrics["apachedruid.sys.storage.used"], "Found a duplicate in the metrics slice: apachedruid.sys.storage.used") - validatedMetrics["apachedruid.sys.storage.used"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Disk space used.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("fs_dir_name") - assert.True(t, ok) - assert.EqualValues(t, "sys_fs_dir_name-val", attrVal.Str()) - case "apachedruid.sys.swap.free": - assert.False(t, validatedMetrics["apachedruid.sys.swap.free"], "Found a duplicate in the metrics slice: apachedruid.sys.swap.free") - validatedMetrics["apachedruid.sys.swap.free"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Free swap.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.swap.max": - assert.False(t, validatedMetrics["apachedruid.sys.swap.max"], "Found a duplicate in the metrics slice: apachedruid.sys.swap.max") - validatedMetrics["apachedruid.sys.swap.max"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Max swap.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.swap.page_in": - assert.False(t, validatedMetrics["apachedruid.sys.swap.page_in"], "Found a duplicate in the metrics slice: apachedruid.sys.swap.page_in") - validatedMetrics["apachedruid.sys.swap.page_in"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Paged in swap.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.swap.page_out": - assert.False(t, validatedMetrics["apachedruid.sys.swap.page_out"], "Found a duplicate in the metrics slice: apachedruid.sys.swap.page_out") - validatedMetrics["apachedruid.sys.swap.page_out"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Paged out swap.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.active_opens": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.active_opens"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.active_opens") - validatedMetrics["apachedruid.sys.tcpv4.active_opens"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total TCP active open connections.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.attempt_fails": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.attempt_fails"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.attempt_fails") - validatedMetrics["apachedruid.sys.tcpv4.attempt_fails"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total TCP active connection failures.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.estab_resets": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.estab_resets"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.estab_resets") - validatedMetrics["apachedruid.sys.tcpv4.estab_resets"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total TCP connection resets.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.in.errs": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.in.errs"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.in.errs") - validatedMetrics["apachedruid.sys.tcpv4.in.errs"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Errors while reading segments.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.in.segs": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.in.segs"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.in.segs") - validatedMetrics["apachedruid.sys.tcpv4.in.segs"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total segments received in connection.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.out.rsts": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.out.rsts"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.out.rsts") - validatedMetrics["apachedruid.sys.tcpv4.out.rsts"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total `out reset` packets sent to reset the connection.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.out.segs": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.out.segs"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.out.segs") - validatedMetrics["apachedruid.sys.tcpv4.out.segs"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total segments sent.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.passive_opens": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.passive_opens"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.passive_opens") - validatedMetrics["apachedruid.sys.tcpv4.passive_opens"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total TCP passive open connections.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.tcpv4.retrans.segs": - assert.False(t, validatedMetrics["apachedruid.sys.tcpv4.retrans.segs"], "Found a duplicate in the metrics slice: apachedruid.sys.tcpv4.retrans.segs") - validatedMetrics["apachedruid.sys.tcpv4.retrans.segs"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total segments re-transmitted.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.sys.uptime": - assert.False(t, validatedMetrics["apachedruid.sys.uptime"], "Found a duplicate in the metrics slice: apachedruid.sys.uptime") - validatedMetrics["apachedruid.sys.uptime"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total system uptime.", ms.At(i).Description()) - assert.Equal(t, "s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.task.action.batch.attempts": - assert.False(t, validatedMetrics["apachedruid.task.action.batch.attempts"], "Found a duplicate in the metrics slice: apachedruid.task.action.batch.attempts") - validatedMetrics["apachedruid.task.action.batch.attempts"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of execution attempts for a single batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) - assert.Equal(t, "{attempts}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "task_interval-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_action_type") - assert.True(t, ok) - assert.EqualValues(t, "task_action_type-val", attrVal.Str()) - case "apachedruid.task.action.batch.queue_time": - assert.False(t, validatedMetrics["apachedruid.task.action.batch.queue_time"], "Found a duplicate in the metrics slice: apachedruid.task.action.batch.queue_time") - validatedMetrics["apachedruid.task.action.batch.queue_time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds spent by a batch of task actions in queue. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "task_interval-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_action_type") - assert.True(t, ok) - assert.EqualValues(t, "task_action_type-val", attrVal.Str()) - case "apachedruid.task.action.batch.run_time": - assert.False(t, validatedMetrics["apachedruid.task.action.batch.run_time"], "Found a duplicate in the metrics slice: apachedruid.task.action.batch.run_time") - validatedMetrics["apachedruid.task.action.batch.run_time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to execute a batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "task_interval-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_action_type") - assert.True(t, ok) - assert.EqualValues(t, "task_action_type-val", attrVal.Str()) - case "apachedruid.task.action.batch.size": - assert.False(t, validatedMetrics["apachedruid.task.action.batch.size"], "Found a duplicate in the metrics slice: apachedruid.task.action.batch.size") - validatedMetrics["apachedruid.task.action.batch.size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of task actions in a batch that was executed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) - assert.Equal(t, "{actions}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interval") - assert.True(t, ok) - assert.EqualValues(t, "task_interval-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_action_type") - assert.True(t, ok) - assert.EqualValues(t, "task_action_type-val", attrVal.Str()) - case "apachedruid.task.action.failed.count": - assert.False(t, validatedMetrics["apachedruid.task.action.failed.count"], "Found a duplicate in the metrics slice: apachedruid.task.action.failed.count") - validatedMetrics["apachedruid.task.action.failed.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of task actions that failed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) - assert.Equal(t, "{actions}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_action_type") - assert.True(t, ok) - assert.EqualValues(t, "task_action_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "task_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "task_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "task_id-val", attrVal.Str()) - case "apachedruid.task.action.log.time": - assert.False(t, validatedMetrics["apachedruid.task.action.log.time"], "Found a duplicate in the metrics slice: apachedruid.task.action.log.time") - validatedMetrics["apachedruid.task.action.log.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to log a task action to the audit log.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_action_type") - assert.True(t, ok) - assert.EqualValues(t, "task_action_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "task_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "task_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "task_id-val", attrVal.Str()) - case "apachedruid.task.action.run.time": - assert.False(t, validatedMetrics["apachedruid.task.action.run.time"], "Found a duplicate in the metrics slice: apachedruid.task.action.run.time") - validatedMetrics["apachedruid.task.action.run.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to execute a task action.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_action_type") - assert.True(t, ok) - assert.EqualValues(t, "task_action_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "task_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "task_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "task_id-val", attrVal.Str()) - case "apachedruid.task.action.success.count": - assert.False(t, validatedMetrics["apachedruid.task.action.success.count"], "Found a duplicate in the metrics slice: apachedruid.task.action.success.count") - validatedMetrics["apachedruid.task.action.success.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of task actions that were executed successfully during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions).", ms.At(i).Description()) - assert.Equal(t, "{actions}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_action_type") - assert.True(t, ok) - assert.EqualValues(t, "task_action_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "task_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "task_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "task_id-val", attrVal.Str()) - case "apachedruid.task.failed.count": - assert.False(t, validatedMetrics["apachedruid.task.failed.count"], "Found a duplicate in the metrics slice: apachedruid.task.failed.count") - validatedMetrics["apachedruid.task.failed.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of failed tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - case "apachedruid.task.pending.count": - assert.False(t, validatedMetrics["apachedruid.task.pending.count"], "Found a duplicate in the metrics slice: apachedruid.task.pending.count") - validatedMetrics["apachedruid.task.pending.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of current pending tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - case "apachedruid.task.pending.time": - assert.False(t, validatedMetrics["apachedruid.task.pending.time"], "Found a duplicate in the metrics slice: apachedruid.task.pending.time") - validatedMetrics["apachedruid.task.pending.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken for a task to wait for running.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "task_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "task_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "task_id-val", attrVal.Str()) - case "apachedruid.task.run.time": - assert.False(t, validatedMetrics["apachedruid.task.run.time"], "Found a duplicate in the metrics slice: apachedruid.task.run.time") - validatedMetrics["apachedruid.task.run.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Milliseconds taken to run a task.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "task_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_status") - assert.True(t, ok) - assert.EqualValues(t, "task_status-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "task_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "task_id-val", attrVal.Str()) - case "apachedruid.task.running.count": - assert.False(t, validatedMetrics["apachedruid.task.running.count"], "Found a duplicate in the metrics slice: apachedruid.task.running.count") - validatedMetrics["apachedruid.task.running.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of current running tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - case "apachedruid.task.segment_availability.wait.time": - assert.False(t, validatedMetrics["apachedruid.task.segment_availability.wait.time"], "Found a duplicate in the metrics slice: apachedruid.task.segment_availability.wait.time") - validatedMetrics["apachedruid.task.segment_availability.wait.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The amount of milliseconds a batch indexing task waited for newly created segments to become available for querying.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("task_type") - assert.True(t, ok) - assert.EqualValues(t, "task_type-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("group_id") - assert.True(t, ok) - assert.EqualValues(t, "task_group_id-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("segment_availability_confirmed") - assert.True(t, ok) - assert.EqualValues(t, "task_segment_availability_confirmed-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("tags") - assert.True(t, ok) - assert.EqualValues(t, "task_tags-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("task_id") - assert.True(t, ok) - assert.EqualValues(t, "task_id-val", attrVal.Str()) - case "apachedruid.task.success.count": - assert.False(t, validatedMetrics["apachedruid.task.success.count"], "Found a duplicate in the metrics slice: apachedruid.task.success.count") - validatedMetrics["apachedruid.task.success.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of successful tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - case "apachedruid.task.waiting.count": - assert.False(t, validatedMetrics["apachedruid.task.waiting.count"], "Found a duplicate in the metrics slice: apachedruid.task.waiting.count") - validatedMetrics["apachedruid.task.waiting.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of current waiting tasks. This metric is only available if the `TaskCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("data_source") - assert.True(t, ok) - assert.EqualValues(t, "task_data_source-val", attrVal.Str()) - case "apachedruid.task_slot.blacklisted.count": - assert.False(t, validatedMetrics["apachedruid.task_slot.blacklisted.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.blacklisted.count") - validatedMetrics["apachedruid.task_slot.blacklisted.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of total task slots in blacklisted Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) - case "apachedruid.task_slot.idle.count": - assert.False(t, validatedMetrics["apachedruid.task_slot.idle.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.idle.count") - validatedMetrics["apachedruid.task_slot.idle.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of idle task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) - case "apachedruid.task_slot.lazy.count": - assert.False(t, validatedMetrics["apachedruid.task_slot.lazy.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.lazy.count") - validatedMetrics["apachedruid.task_slot.lazy.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of total task slots in lazy marked Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) - case "apachedruid.task_slot.total.count": - assert.False(t, validatedMetrics["apachedruid.task_slot.total.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.total.count") - validatedMetrics["apachedruid.task_slot.total.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of total task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) - case "apachedruid.task_slot.used.count": - assert.False(t, validatedMetrics["apachedruid.task_slot.used.count"], "Found a duplicate in the metrics slice: apachedruid.task_slot.used.count") - validatedMetrics["apachedruid.task_slot.used.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of busy task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "taskSlot_category-val", attrVal.Str()) - case "apachedruid.tier.historical.count": - assert.False(t, validatedMetrics["apachedruid.tier.historical.count"], "Found a duplicate in the metrics slice: apachedruid.tier.historical.count") - validatedMetrics["apachedruid.tier.historical.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of available historical nodes in each tier.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "tier-val", attrVal.Str()) - case "apachedruid.tier.replication.factor": - assert.False(t, validatedMetrics["apachedruid.tier.replication.factor"], "Found a duplicate in the metrics slice: apachedruid.tier.replication.factor") - validatedMetrics["apachedruid.tier.replication.factor"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Configured maximum replication factor in each tier.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "tier-val", attrVal.Str()) - case "apachedruid.tier.required.capacity": - assert.False(t, validatedMetrics["apachedruid.tier.required.capacity"], "Found a duplicate in the metrics slice: apachedruid.tier.required.capacity") - validatedMetrics["apachedruid.tier.required.capacity"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total capacity in bytes required in each tier.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "tier-val", attrVal.Str()) - case "apachedruid.tier.total.capacity": - assert.False(t, validatedMetrics["apachedruid.tier.total.capacity"], "Found a duplicate in the metrics slice: apachedruid.tier.total.capacity") - validatedMetrics["apachedruid.tier.total.capacity"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Total capacity in bytes available in each tier.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("tier") - assert.True(t, ok) - assert.EqualValues(t, "tier-val", attrVal.Str()) - case "apachedruid.worker.task.failed.count": - assert.False(t, validatedMetrics["apachedruid.worker.task.failed.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task.failed.count") - validatedMetrics["apachedruid.worker.task.failed.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of failed tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "worker_category-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("worker_version") - assert.True(t, ok) - assert.EqualValues(t, "worker_version-val", attrVal.Str()) - case "apachedruid.worker.task.success.count": - assert.False(t, validatedMetrics["apachedruid.worker.task.success.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task.success.count") - validatedMetrics["apachedruid.worker.task.success.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of successful tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.", ms.At(i).Description()) - assert.Equal(t, "{tasks}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "worker_category-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("worker_version") - assert.True(t, ok) - assert.EqualValues(t, "worker_version-val", attrVal.Str()) - case "apachedruid.worker.task_slot.idle.count": - assert.False(t, validatedMetrics["apachedruid.worker.task_slot.idle.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task_slot.idle.count") - validatedMetrics["apachedruid.worker.task_slot.idle.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of idle task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "worker_category-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("worker_version") - assert.True(t, ok) - assert.EqualValues(t, "worker_version-val", attrVal.Str()) - case "apachedruid.worker.task_slot.total.count": - assert.False(t, validatedMetrics["apachedruid.worker.task_slot.total.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task_slot.total.count") - validatedMetrics["apachedruid.worker.task_slot.total.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of total task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "worker_category-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("worker_version") - assert.True(t, ok) - assert.EqualValues(t, "worker_version-val", attrVal.Str()) - case "apachedruid.worker.task_slot.used.count": - assert.False(t, validatedMetrics["apachedruid.worker.task_slot.used.count"], "Found a duplicate in the metrics slice: apachedruid.worker.task_slot.used.count") - validatedMetrics["apachedruid.worker.task_slot.used.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of busy task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included.", ms.At(i).Description()) - assert.Equal(t, "{slots}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityDelta, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("category") - assert.True(t, ok) - assert.EqualValues(t, "worker_category-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("worker_version") - assert.True(t, ok) - assert.EqualValues(t, "worker_version-val", attrVal.Str()) - case "apachedruid.zk.connected": - assert.False(t, validatedMetrics["apachedruid.zk.connected"], "Found a duplicate in the metrics slice: apachedruid.zk.connected") - validatedMetrics["apachedruid.zk.connected"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Indicator of connection status. `1` for connected, `0` for disconnected. Emitted once per monitor period.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "apachedruid.zk.reconnect.time": - assert.False(t, validatedMetrics["apachedruid.zk.reconnect.time"], "Found a duplicate in the metrics slice: apachedruid.zk.reconnect.time") - validatedMetrics["apachedruid.zk.reconnect.time"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Amount of time, in milliseconds, that a server was disconnected from ZooKeeper before reconnecting. Emitted on reconnection. Not emitted if connection to ZooKeeper is permanently lost, because in this case, there is no reconnection.", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - } - } - }) - } -} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_resource.go b/receiver/apachedruidreceiver/internal/metadata/generated_resource.go deleted file mode 100644 index 98fa1566e4515..0000000000000 --- a/receiver/apachedruidreceiver/internal/metadata/generated_resource.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/pdata/pcommon" -) - -// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml. -// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines. -type ResourceBuilder struct { - config ResourceAttributesConfig - res pcommon.Resource -} - -// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application. -func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder { - return &ResourceBuilder{ - config: rac, - res: pcommon.NewResource(), - } -} - -// SetApachedruidClusterName sets provided value as "apachedruid.cluster.name" attribute. -func (rb *ResourceBuilder) SetApachedruidClusterName(val string) { - if rb.config.ApachedruidClusterName.Enabled { - rb.res.Attributes().PutStr("apachedruid.cluster.name", val) - } -} - -// SetApachedruidNodeHost sets provided value as "apachedruid.node.host" attribute. -func (rb *ResourceBuilder) SetApachedruidNodeHost(val string) { - if rb.config.ApachedruidNodeHost.Enabled { - rb.res.Attributes().PutStr("apachedruid.node.host", val) - } -} - -// SetApachedruidNodeService sets provided value as "apachedruid.node.service" attribute. -func (rb *ResourceBuilder) SetApachedruidNodeService(val string) { - if rb.config.ApachedruidNodeService.Enabled { - rb.res.Attributes().PutStr("apachedruid.node.service", val) - } -} - -// Emit returns the built resource and resets the internal builder state. -func (rb *ResourceBuilder) Emit() pcommon.Resource { - r := rb.res - rb.res = pcommon.NewResource() - return r -} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_resource_test.go b/receiver/apachedruidreceiver/internal/metadata/generated_resource_test.go deleted file mode 100644 index 12ebc19c48eda..0000000000000 --- a/receiver/apachedruidreceiver/internal/metadata/generated_resource_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestResourceBuilder(t *testing.T) { - for _, test := range []string{"default", "all_set", "none_set"} { - t.Run(test, func(t *testing.T) { - cfg := loadResourceAttributesConfig(t, test) - rb := NewResourceBuilder(cfg) - rb.SetApachedruidClusterName("apachedruid.cluster.name-val") - rb.SetApachedruidNodeHost("apachedruid.node.host-val") - rb.SetApachedruidNodeService("apachedruid.node.service-val") - - res := rb.Emit() - assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource - - switch test { - case "default": - assert.Equal(t, 3, res.Attributes().Len()) - case "all_set": - assert.Equal(t, 3, res.Attributes().Len()) - case "none_set": - assert.Equal(t, 0, res.Attributes().Len()) - return - default: - assert.Failf(t, "unexpected test case: %s", test) - } - - val, ok := res.Attributes().Get("apachedruid.cluster.name") - assert.True(t, ok) - if ok { - assert.EqualValues(t, "apachedruid.cluster.name-val", val.Str()) - } - val, ok = res.Attributes().Get("apachedruid.node.host") - assert.True(t, ok) - if ok { - assert.EqualValues(t, "apachedruid.node.host-val", val.Str()) - } - val, ok = res.Attributes().Get("apachedruid.node.service") - assert.True(t, ok) - if ok { - assert.EqualValues(t, "apachedruid.node.service-val", val.Str()) - } - }) - } -} diff --git a/receiver/apachedruidreceiver/internal/metadata/generated_status.go b/receiver/apachedruidreceiver/internal/metadata/generated_status.go index d1ec0263cd299..a4fa56794f550 100644 --- a/receiver/apachedruidreceiver/internal/metadata/generated_status.go +++ b/receiver/apachedruidreceiver/internal/metadata/generated_status.go @@ -14,7 +14,6 @@ var ( const ( MetricsStability = component.StabilityLevelDevelopment - LogsStability = component.StabilityLevelDevelopment ) func Meter(settings component.TelemetrySettings) metric.Meter { diff --git a/receiver/apachedruidreceiver/internal/metadata/package_test.go b/receiver/apachedruidreceiver/internal/metadata/package_test.go deleted file mode 100644 index 1aba5ec4bb0b5..0000000000000 --- a/receiver/apachedruidreceiver/internal/metadata/package_test.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metadata - -import ( - "testing" - - "go.uber.org/goleak" -) - -func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) -} diff --git a/receiver/apachedruidreceiver/internal/metadata/testdata/config.yaml b/receiver/apachedruidreceiver/internal/metadata/testdata/config.yaml deleted file mode 100644 index 71d65b6a141c0..0000000000000 --- a/receiver/apachedruidreceiver/internal/metadata/testdata/config.yaml +++ /dev/null @@ -1,999 +0,0 @@ -default: -all_set: - metrics: - apachedruid.compact.segment_analyzer.fetch_and_process_millis: - enabled: true - apachedruid.compact.task.count: - enabled: true - apachedruid.compact_task.available_slot.count: - enabled: true - apachedruid.compact_task.max_slot.count: - enabled: true - apachedruid.coordinator.global.time: - enabled: true - apachedruid.coordinator.time: - enabled: true - apachedruid.ingest.bytes.received: - enabled: true - apachedruid.ingest.count: - enabled: true - apachedruid.ingest.events.buffered: - enabled: true - apachedruid.ingest.events.duplicate: - enabled: true - apachedruid.ingest.events.message_gap: - enabled: true - apachedruid.ingest.events.processed: - enabled: true - apachedruid.ingest.events.processed_with_error: - enabled: true - apachedruid.ingest.events.thrown_away: - enabled: true - apachedruid.ingest.events.unparseable: - enabled: true - apachedruid.ingest.handoff.count: - enabled: true - apachedruid.ingest.handoff.failed: - enabled: true - apachedruid.ingest.handoff.time: - enabled: true - apachedruid.ingest.input.bytes: - enabled: true - apachedruid.ingest.kafka.avg_lag: - enabled: true - apachedruid.ingest.kafka.lag: - enabled: true - apachedruid.ingest.kafka.max_lag: - enabled: true - apachedruid.ingest.kafka.partition_lag: - enabled: true - apachedruid.ingest.kinesis.avg_lag.time: - enabled: true - apachedruid.ingest.kinesis.lag.time: - enabled: true - apachedruid.ingest.kinesis.max_lag.time: - enabled: true - apachedruid.ingest.kinesis.partition_lag.time: - enabled: true - apachedruid.ingest.merge.cpu: - enabled: true - apachedruid.ingest.merge.time: - enabled: true - apachedruid.ingest.notices.queue_size: - enabled: true - apachedruid.ingest.notices.time: - enabled: true - apachedruid.ingest.pause.time: - enabled: true - apachedruid.ingest.persists.back_pressure: - enabled: true - apachedruid.ingest.persists.count: - enabled: true - apachedruid.ingest.persists.cpu: - enabled: true - apachedruid.ingest.persists.failed: - enabled: true - apachedruid.ingest.persists.time: - enabled: true - apachedruid.ingest.rows.output: - enabled: true - apachedruid.ingest.segments.count: - enabled: true - apachedruid.ingest.shuffle.bytes: - enabled: true - apachedruid.ingest.shuffle.requests: - enabled: true - apachedruid.ingest.sink.count: - enabled: true - apachedruid.ingest.tombstones.count: - enabled: true - apachedruid.interval.compacted.count: - enabled: true - apachedruid.interval.skip_compact.count: - enabled: true - apachedruid.interval.wait_compact.count: - enabled: true - apachedruid.jetty.num_open_connections: - enabled: true - apachedruid.jetty.thread_pool.busy: - enabled: true - apachedruid.jetty.thread_pool.idle: - enabled: true - apachedruid.jetty.thread_pool.is_low_on_threads: - enabled: true - apachedruid.jetty.thread_pool.max: - enabled: true - apachedruid.jetty.thread_pool.min: - enabled: true - apachedruid.jetty.thread_pool.queue_size: - enabled: true - apachedruid.jetty.thread_pool.total: - enabled: true - apachedruid.jvm.bufferpool.capacity: - enabled: true - apachedruid.jvm.bufferpool.count: - enabled: true - apachedruid.jvm.bufferpool.used: - enabled: true - apachedruid.jvm.gc.count: - enabled: true - apachedruid.jvm.gc.cpu: - enabled: true - apachedruid.jvm.mem.committed: - enabled: true - apachedruid.jvm.mem.init: - enabled: true - apachedruid.jvm.mem.max: - enabled: true - apachedruid.jvm.mem.used: - enabled: true - apachedruid.jvm.pool.committed: - enabled: true - apachedruid.jvm.pool.init: - enabled: true - apachedruid.jvm.pool.max: - enabled: true - apachedruid.jvm.pool.used: - enabled: true - apachedruid.kill.pending_segments.count: - enabled: true - apachedruid.kill.task.count: - enabled: true - apachedruid.kill_task.available_slot.count: - enabled: true - apachedruid.kill_task.max_slot.count: - enabled: true - apachedruid.merge_buffer.pending_requests: - enabled: true - apachedruid.metadata.kill.audit.count: - enabled: true - apachedruid.metadata.kill.compaction.count: - enabled: true - apachedruid.metadata.kill.datasource.count: - enabled: true - apachedruid.metadata.kill.rule.count: - enabled: true - apachedruid.metadata.kill.supervisor.count: - enabled: true - apachedruid.metadatacache.init.time: - enabled: true - apachedruid.metadatacache.refresh.count: - enabled: true - apachedruid.metadatacache.refresh.time: - enabled: true - apachedruid.query.byte_limit.exceeded.count: - enabled: true - apachedruid.query.bytes: - enabled: true - apachedruid.query.cache.delta.average_bytes: - enabled: true - apachedruid.query.cache.delta.errors: - enabled: true - apachedruid.query.cache.delta.evictions: - enabled: true - apachedruid.query.cache.delta.hit_rate: - enabled: true - apachedruid.query.cache.delta.hits: - enabled: true - apachedruid.query.cache.delta.misses: - enabled: true - apachedruid.query.cache.delta.num_entries: - enabled: true - apachedruid.query.cache.delta.put.error: - enabled: true - apachedruid.query.cache.delta.put.ok: - enabled: true - apachedruid.query.cache.delta.put.oversized: - enabled: true - apachedruid.query.cache.delta.size_bytes: - enabled: true - apachedruid.query.cache.delta.timeouts: - enabled: true - apachedruid.query.cache.memcached.delta: - enabled: true - apachedruid.query.cache.memcached.total: - enabled: true - apachedruid.query.cache.total.average_bytes: - enabled: true - apachedruid.query.cache.total.errors: - enabled: true - apachedruid.query.cache.total.evictions: - enabled: true - apachedruid.query.cache.total.hit_rate: - enabled: true - apachedruid.query.cache.total.hits: - enabled: true - apachedruid.query.cache.total.misses: - enabled: true - apachedruid.query.cache.total.num_entries: - enabled: true - apachedruid.query.cache.total.put.error: - enabled: true - apachedruid.query.cache.total.put.ok: - enabled: true - apachedruid.query.cache.total.put.oversized: - enabled: true - apachedruid.query.cache.total.size_bytes: - enabled: true - apachedruid.query.cache.total.timeouts: - enabled: true - apachedruid.query.count: - enabled: true - apachedruid.query.cpu.time: - enabled: true - apachedruid.query.failed.count: - enabled: true - apachedruid.query.interrupted.count: - enabled: true - apachedruid.query.node.backpressure: - enabled: true - apachedruid.query.node.bytes: - enabled: true - apachedruid.query.node.time: - enabled: true - apachedruid.query.node.ttfb: - enabled: true - apachedruid.query.priority: - enabled: true - apachedruid.query.row_limit.exceeded.count: - enabled: true - apachedruid.query.segment.time: - enabled: true - apachedruid.query.segment_and_cache.time: - enabled: true - apachedruid.query.segments.count: - enabled: true - apachedruid.query.success.count: - enabled: true - apachedruid.query.time: - enabled: true - apachedruid.query.timeout.count: - enabled: true - apachedruid.query.wait.time: - enabled: true - apachedruid.segment.added.bytes: - enabled: true - apachedruid.segment.assign_skipped.count: - enabled: true - apachedruid.segment.assigned.count: - enabled: true - apachedruid.segment.compacted.bytes: - enabled: true - apachedruid.segment.compacted.count: - enabled: true - apachedruid.segment.count: - enabled: true - apachedruid.segment.deleted.count: - enabled: true - apachedruid.segment.drop_queue.count: - enabled: true - apachedruid.segment.drop_skipped.count: - enabled: true - apachedruid.segment.dropped.count: - enabled: true - apachedruid.segment.load_queue.assigned: - enabled: true - apachedruid.segment.load_queue.cancelled: - enabled: true - apachedruid.segment.load_queue.count: - enabled: true - apachedruid.segment.load_queue.failed: - enabled: true - apachedruid.segment.load_queue.size: - enabled: true - apachedruid.segment.load_queue.success: - enabled: true - apachedruid.segment.max: - enabled: true - apachedruid.segment.move_skipped.count: - enabled: true - apachedruid.segment.moved.bytes: - enabled: true - apachedruid.segment.moved.count: - enabled: true - apachedruid.segment.nuked.bytes: - enabled: true - apachedruid.segment.over_shadowed.count: - enabled: true - apachedruid.segment.pending_delete: - enabled: true - apachedruid.segment.row_count.avg: - enabled: true - apachedruid.segment.row_count.range.count: - enabled: true - apachedruid.segment.scan.active: - enabled: true - apachedruid.segment.scan.pending: - enabled: true - apachedruid.segment.size: - enabled: true - apachedruid.segment.skip_compact.bytes: - enabled: true - apachedruid.segment.skip_compact.count: - enabled: true - apachedruid.segment.unavailable.count: - enabled: true - apachedruid.segment.under_replicated.count: - enabled: true - apachedruid.segment.unneeded.count: - enabled: true - apachedruid.segment.used: - enabled: true - apachedruid.segment.used_percent: - enabled: true - apachedruid.segment.wait_compact.bytes: - enabled: true - apachedruid.segment.wait_compact.count: - enabled: true - apachedruid.serverview.init.time: - enabled: true - apachedruid.serverview.sync.healthy: - enabled: true - apachedruid.serverview.sync.unstable_time: - enabled: true - apachedruid.sql_query.bytes: - enabled: true - apachedruid.sql_query.planning_time_ms: - enabled: true - apachedruid.sql_query.time: - enabled: true - apachedruid.subquery.byte_limit.count: - enabled: true - apachedruid.subquery.fallback.count: - enabled: true - apachedruid.subquery.fallback.insufficient_type.count: - enabled: true - apachedruid.subquery.fallback.unknown_reason.count: - enabled: true - apachedruid.subquery.row_limit.count: - enabled: true - apachedruid.sys.cpu: - enabled: true - apachedruid.sys.disk.queue: - enabled: true - apachedruid.sys.disk.read.count: - enabled: true - apachedruid.sys.disk.read.size: - enabled: true - apachedruid.sys.disk.transfer_time: - enabled: true - apachedruid.sys.disk.write.count: - enabled: true - apachedruid.sys.disk.write.size: - enabled: true - apachedruid.sys.fs.files.count: - enabled: true - apachedruid.sys.fs.files.free: - enabled: true - apachedruid.sys.fs.max: - enabled: true - apachedruid.sys.fs.used: - enabled: true - apachedruid.sys.la.1: - enabled: true - apachedruid.sys.la.15: - enabled: true - apachedruid.sys.la.5: - enabled: true - apachedruid.sys.mem.free: - enabled: true - apachedruid.sys.mem.max: - enabled: true - apachedruid.sys.mem.used: - enabled: true - apachedruid.sys.net.read.dropped: - enabled: true - apachedruid.sys.net.read.errors: - enabled: true - apachedruid.sys.net.read.packets: - enabled: true - apachedruid.sys.net.read.size: - enabled: true - apachedruid.sys.net.write.collisions: - enabled: true - apachedruid.sys.net.write.errors: - enabled: true - apachedruid.sys.net.write.packets: - enabled: true - apachedruid.sys.net.write.size: - enabled: true - apachedruid.sys.storage.used: - enabled: true - apachedruid.sys.swap.free: - enabled: true - apachedruid.sys.swap.max: - enabled: true - apachedruid.sys.swap.page_in: - enabled: true - apachedruid.sys.swap.page_out: - enabled: true - apachedruid.sys.tcpv4.active_opens: - enabled: true - apachedruid.sys.tcpv4.attempt_fails: - enabled: true - apachedruid.sys.tcpv4.estab_resets: - enabled: true - apachedruid.sys.tcpv4.in.errs: - enabled: true - apachedruid.sys.tcpv4.in.segs: - enabled: true - apachedruid.sys.tcpv4.out.rsts: - enabled: true - apachedruid.sys.tcpv4.out.segs: - enabled: true - apachedruid.sys.tcpv4.passive_opens: - enabled: true - apachedruid.sys.tcpv4.retrans.segs: - enabled: true - apachedruid.sys.uptime: - enabled: true - apachedruid.task.action.batch.attempts: - enabled: true - apachedruid.task.action.batch.queue_time: - enabled: true - apachedruid.task.action.batch.run_time: - enabled: true - apachedruid.task.action.batch.size: - enabled: true - apachedruid.task.action.failed.count: - enabled: true - apachedruid.task.action.log.time: - enabled: true - apachedruid.task.action.run.time: - enabled: true - apachedruid.task.action.success.count: - enabled: true - apachedruid.task.failed.count: - enabled: true - apachedruid.task.pending.count: - enabled: true - apachedruid.task.pending.time: - enabled: true - apachedruid.task.run.time: - enabled: true - apachedruid.task.running.count: - enabled: true - apachedruid.task.segment_availability.wait.time: - enabled: true - apachedruid.task.success.count: - enabled: true - apachedruid.task.waiting.count: - enabled: true - apachedruid.task_slot.blacklisted.count: - enabled: true - apachedruid.task_slot.idle.count: - enabled: true - apachedruid.task_slot.lazy.count: - enabled: true - apachedruid.task_slot.total.count: - enabled: true - apachedruid.task_slot.used.count: - enabled: true - apachedruid.tier.historical.count: - enabled: true - apachedruid.tier.replication.factor: - enabled: true - apachedruid.tier.required.capacity: - enabled: true - apachedruid.tier.total.capacity: - enabled: true - apachedruid.worker.task.failed.count: - enabled: true - apachedruid.worker.task.success.count: - enabled: true - apachedruid.worker.task_slot.idle.count: - enabled: true - apachedruid.worker.task_slot.total.count: - enabled: true - apachedruid.worker.task_slot.used.count: - enabled: true - apachedruid.zk.connected: - enabled: true - apachedruid.zk.reconnect.time: - enabled: true - resource_attributes: - apachedruid.cluster.name: - enabled: true - apachedruid.node.host: - enabled: true - apachedruid.node.service: - enabled: true -none_set: - metrics: - apachedruid.compact.segment_analyzer.fetch_and_process_millis: - enabled: false - apachedruid.compact.task.count: - enabled: false - apachedruid.compact_task.available_slot.count: - enabled: false - apachedruid.compact_task.max_slot.count: - enabled: false - apachedruid.coordinator.global.time: - enabled: false - apachedruid.coordinator.time: - enabled: false - apachedruid.ingest.bytes.received: - enabled: false - apachedruid.ingest.count: - enabled: false - apachedruid.ingest.events.buffered: - enabled: false - apachedruid.ingest.events.duplicate: - enabled: false - apachedruid.ingest.events.message_gap: - enabled: false - apachedruid.ingest.events.processed: - enabled: false - apachedruid.ingest.events.processed_with_error: - enabled: false - apachedruid.ingest.events.thrown_away: - enabled: false - apachedruid.ingest.events.unparseable: - enabled: false - apachedruid.ingest.handoff.count: - enabled: false - apachedruid.ingest.handoff.failed: - enabled: false - apachedruid.ingest.handoff.time: - enabled: false - apachedruid.ingest.input.bytes: - enabled: false - apachedruid.ingest.kafka.avg_lag: - enabled: false - apachedruid.ingest.kafka.lag: - enabled: false - apachedruid.ingest.kafka.max_lag: - enabled: false - apachedruid.ingest.kafka.partition_lag: - enabled: false - apachedruid.ingest.kinesis.avg_lag.time: - enabled: false - apachedruid.ingest.kinesis.lag.time: - enabled: false - apachedruid.ingest.kinesis.max_lag.time: - enabled: false - apachedruid.ingest.kinesis.partition_lag.time: - enabled: false - apachedruid.ingest.merge.cpu: - enabled: false - apachedruid.ingest.merge.time: - enabled: false - apachedruid.ingest.notices.queue_size: - enabled: false - apachedruid.ingest.notices.time: - enabled: false - apachedruid.ingest.pause.time: - enabled: false - apachedruid.ingest.persists.back_pressure: - enabled: false - apachedruid.ingest.persists.count: - enabled: false - apachedruid.ingest.persists.cpu: - enabled: false - apachedruid.ingest.persists.failed: - enabled: false - apachedruid.ingest.persists.time: - enabled: false - apachedruid.ingest.rows.output: - enabled: false - apachedruid.ingest.segments.count: - enabled: false - apachedruid.ingest.shuffle.bytes: - enabled: false - apachedruid.ingest.shuffle.requests: - enabled: false - apachedruid.ingest.sink.count: - enabled: false - apachedruid.ingest.tombstones.count: - enabled: false - apachedruid.interval.compacted.count: - enabled: false - apachedruid.interval.skip_compact.count: - enabled: false - apachedruid.interval.wait_compact.count: - enabled: false - apachedruid.jetty.num_open_connections: - enabled: false - apachedruid.jetty.thread_pool.busy: - enabled: false - apachedruid.jetty.thread_pool.idle: - enabled: false - apachedruid.jetty.thread_pool.is_low_on_threads: - enabled: false - apachedruid.jetty.thread_pool.max: - enabled: false - apachedruid.jetty.thread_pool.min: - enabled: false - apachedruid.jetty.thread_pool.queue_size: - enabled: false - apachedruid.jetty.thread_pool.total: - enabled: false - apachedruid.jvm.bufferpool.capacity: - enabled: false - apachedruid.jvm.bufferpool.count: - enabled: false - apachedruid.jvm.bufferpool.used: - enabled: false - apachedruid.jvm.gc.count: - enabled: false - apachedruid.jvm.gc.cpu: - enabled: false - apachedruid.jvm.mem.committed: - enabled: false - apachedruid.jvm.mem.init: - enabled: false - apachedruid.jvm.mem.max: - enabled: false - apachedruid.jvm.mem.used: - enabled: false - apachedruid.jvm.pool.committed: - enabled: false - apachedruid.jvm.pool.init: - enabled: false - apachedruid.jvm.pool.max: - enabled: false - apachedruid.jvm.pool.used: - enabled: false - apachedruid.kill.pending_segments.count: - enabled: false - apachedruid.kill.task.count: - enabled: false - apachedruid.kill_task.available_slot.count: - enabled: false - apachedruid.kill_task.max_slot.count: - enabled: false - apachedruid.merge_buffer.pending_requests: - enabled: false - apachedruid.metadata.kill.audit.count: - enabled: false - apachedruid.metadata.kill.compaction.count: - enabled: false - apachedruid.metadata.kill.datasource.count: - enabled: false - apachedruid.metadata.kill.rule.count: - enabled: false - apachedruid.metadata.kill.supervisor.count: - enabled: false - apachedruid.metadatacache.init.time: - enabled: false - apachedruid.metadatacache.refresh.count: - enabled: false - apachedruid.metadatacache.refresh.time: - enabled: false - apachedruid.query.byte_limit.exceeded.count: - enabled: false - apachedruid.query.bytes: - enabled: false - apachedruid.query.cache.delta.average_bytes: - enabled: false - apachedruid.query.cache.delta.errors: - enabled: false - apachedruid.query.cache.delta.evictions: - enabled: false - apachedruid.query.cache.delta.hit_rate: - enabled: false - apachedruid.query.cache.delta.hits: - enabled: false - apachedruid.query.cache.delta.misses: - enabled: false - apachedruid.query.cache.delta.num_entries: - enabled: false - apachedruid.query.cache.delta.put.error: - enabled: false - apachedruid.query.cache.delta.put.ok: - enabled: false - apachedruid.query.cache.delta.put.oversized: - enabled: false - apachedruid.query.cache.delta.size_bytes: - enabled: false - apachedruid.query.cache.delta.timeouts: - enabled: false - apachedruid.query.cache.memcached.delta: - enabled: false - apachedruid.query.cache.memcached.total: - enabled: false - apachedruid.query.cache.total.average_bytes: - enabled: false - apachedruid.query.cache.total.errors: - enabled: false - apachedruid.query.cache.total.evictions: - enabled: false - apachedruid.query.cache.total.hit_rate: - enabled: false - apachedruid.query.cache.total.hits: - enabled: false - apachedruid.query.cache.total.misses: - enabled: false - apachedruid.query.cache.total.num_entries: - enabled: false - apachedruid.query.cache.total.put.error: - enabled: false - apachedruid.query.cache.total.put.ok: - enabled: false - apachedruid.query.cache.total.put.oversized: - enabled: false - apachedruid.query.cache.total.size_bytes: - enabled: false - apachedruid.query.cache.total.timeouts: - enabled: false - apachedruid.query.count: - enabled: false - apachedruid.query.cpu.time: - enabled: false - apachedruid.query.failed.count: - enabled: false - apachedruid.query.interrupted.count: - enabled: false - apachedruid.query.node.backpressure: - enabled: false - apachedruid.query.node.bytes: - enabled: false - apachedruid.query.node.time: - enabled: false - apachedruid.query.node.ttfb: - enabled: false - apachedruid.query.priority: - enabled: false - apachedruid.query.row_limit.exceeded.count: - enabled: false - apachedruid.query.segment.time: - enabled: false - apachedruid.query.segment_and_cache.time: - enabled: false - apachedruid.query.segments.count: - enabled: false - apachedruid.query.success.count: - enabled: false - apachedruid.query.time: - enabled: false - apachedruid.query.timeout.count: - enabled: false - apachedruid.query.wait.time: - enabled: false - apachedruid.segment.added.bytes: - enabled: false - apachedruid.segment.assign_skipped.count: - enabled: false - apachedruid.segment.assigned.count: - enabled: false - apachedruid.segment.compacted.bytes: - enabled: false - apachedruid.segment.compacted.count: - enabled: false - apachedruid.segment.count: - enabled: false - apachedruid.segment.deleted.count: - enabled: false - apachedruid.segment.drop_queue.count: - enabled: false - apachedruid.segment.drop_skipped.count: - enabled: false - apachedruid.segment.dropped.count: - enabled: false - apachedruid.segment.load_queue.assigned: - enabled: false - apachedruid.segment.load_queue.cancelled: - enabled: false - apachedruid.segment.load_queue.count: - enabled: false - apachedruid.segment.load_queue.failed: - enabled: false - apachedruid.segment.load_queue.size: - enabled: false - apachedruid.segment.load_queue.success: - enabled: false - apachedruid.segment.max: - enabled: false - apachedruid.segment.move_skipped.count: - enabled: false - apachedruid.segment.moved.bytes: - enabled: false - apachedruid.segment.moved.count: - enabled: false - apachedruid.segment.nuked.bytes: - enabled: false - apachedruid.segment.over_shadowed.count: - enabled: false - apachedruid.segment.pending_delete: - enabled: false - apachedruid.segment.row_count.avg: - enabled: false - apachedruid.segment.row_count.range.count: - enabled: false - apachedruid.segment.scan.active: - enabled: false - apachedruid.segment.scan.pending: - enabled: false - apachedruid.segment.size: - enabled: false - apachedruid.segment.skip_compact.bytes: - enabled: false - apachedruid.segment.skip_compact.count: - enabled: false - apachedruid.segment.unavailable.count: - enabled: false - apachedruid.segment.under_replicated.count: - enabled: false - apachedruid.segment.unneeded.count: - enabled: false - apachedruid.segment.used: - enabled: false - apachedruid.segment.used_percent: - enabled: false - apachedruid.segment.wait_compact.bytes: - enabled: false - apachedruid.segment.wait_compact.count: - enabled: false - apachedruid.serverview.init.time: - enabled: false - apachedruid.serverview.sync.healthy: - enabled: false - apachedruid.serverview.sync.unstable_time: - enabled: false - apachedruid.sql_query.bytes: - enabled: false - apachedruid.sql_query.planning_time_ms: - enabled: false - apachedruid.sql_query.time: - enabled: false - apachedruid.subquery.byte_limit.count: - enabled: false - apachedruid.subquery.fallback.count: - enabled: false - apachedruid.subquery.fallback.insufficient_type.count: - enabled: false - apachedruid.subquery.fallback.unknown_reason.count: - enabled: false - apachedruid.subquery.row_limit.count: - enabled: false - apachedruid.sys.cpu: - enabled: false - apachedruid.sys.disk.queue: - enabled: false - apachedruid.sys.disk.read.count: - enabled: false - apachedruid.sys.disk.read.size: - enabled: false - apachedruid.sys.disk.transfer_time: - enabled: false - apachedruid.sys.disk.write.count: - enabled: false - apachedruid.sys.disk.write.size: - enabled: false - apachedruid.sys.fs.files.count: - enabled: false - apachedruid.sys.fs.files.free: - enabled: false - apachedruid.sys.fs.max: - enabled: false - apachedruid.sys.fs.used: - enabled: false - apachedruid.sys.la.1: - enabled: false - apachedruid.sys.la.15: - enabled: false - apachedruid.sys.la.5: - enabled: false - apachedruid.sys.mem.free: - enabled: false - apachedruid.sys.mem.max: - enabled: false - apachedruid.sys.mem.used: - enabled: false - apachedruid.sys.net.read.dropped: - enabled: false - apachedruid.sys.net.read.errors: - enabled: false - apachedruid.sys.net.read.packets: - enabled: false - apachedruid.sys.net.read.size: - enabled: false - apachedruid.sys.net.write.collisions: - enabled: false - apachedruid.sys.net.write.errors: - enabled: false - apachedruid.sys.net.write.packets: - enabled: false - apachedruid.sys.net.write.size: - enabled: false - apachedruid.sys.storage.used: - enabled: false - apachedruid.sys.swap.free: - enabled: false - apachedruid.sys.swap.max: - enabled: false - apachedruid.sys.swap.page_in: - enabled: false - apachedruid.sys.swap.page_out: - enabled: false - apachedruid.sys.tcpv4.active_opens: - enabled: false - apachedruid.sys.tcpv4.attempt_fails: - enabled: false - apachedruid.sys.tcpv4.estab_resets: - enabled: false - apachedruid.sys.tcpv4.in.errs: - enabled: false - apachedruid.sys.tcpv4.in.segs: - enabled: false - apachedruid.sys.tcpv4.out.rsts: - enabled: false - apachedruid.sys.tcpv4.out.segs: - enabled: false - apachedruid.sys.tcpv4.passive_opens: - enabled: false - apachedruid.sys.tcpv4.retrans.segs: - enabled: false - apachedruid.sys.uptime: - enabled: false - apachedruid.task.action.batch.attempts: - enabled: false - apachedruid.task.action.batch.queue_time: - enabled: false - apachedruid.task.action.batch.run_time: - enabled: false - apachedruid.task.action.batch.size: - enabled: false - apachedruid.task.action.failed.count: - enabled: false - apachedruid.task.action.log.time: - enabled: false - apachedruid.task.action.run.time: - enabled: false - apachedruid.task.action.success.count: - enabled: false - apachedruid.task.failed.count: - enabled: false - apachedruid.task.pending.count: - enabled: false - apachedruid.task.pending.time: - enabled: false - apachedruid.task.run.time: - enabled: false - apachedruid.task.running.count: - enabled: false - apachedruid.task.segment_availability.wait.time: - enabled: false - apachedruid.task.success.count: - enabled: false - apachedruid.task.waiting.count: - enabled: false - apachedruid.task_slot.blacklisted.count: - enabled: false - apachedruid.task_slot.idle.count: - enabled: false - apachedruid.task_slot.lazy.count: - enabled: false - apachedruid.task_slot.total.count: - enabled: false - apachedruid.task_slot.used.count: - enabled: false - apachedruid.tier.historical.count: - enabled: false - apachedruid.tier.replication.factor: - enabled: false - apachedruid.tier.required.capacity: - enabled: false - apachedruid.tier.total.capacity: - enabled: false - apachedruid.worker.task.failed.count: - enabled: false - apachedruid.worker.task.success.count: - enabled: false - apachedruid.worker.task_slot.idle.count: - enabled: false - apachedruid.worker.task_slot.total.count: - enabled: false - apachedruid.worker.task_slot.used.count: - enabled: false - apachedruid.zk.connected: - enabled: false - apachedruid.zk.reconnect.time: - enabled: false - resource_attributes: - apachedruid.cluster.name: - enabled: false - apachedruid.node.host: - enabled: false - apachedruid.node.service: - enabled: false diff --git a/receiver/apachedruidreceiver/metadata.yaml b/receiver/apachedruidreceiver/metadata.yaml index 18c0c82067736..3c213ecee5558 100644 --- a/receiver/apachedruidreceiver/metadata.yaml +++ b/receiver/apachedruidreceiver/metadata.yaml @@ -3,2539 +3,7 @@ type: apachedruid status: class: receiver stability: - development: [metrics, logs] + development: [metrics] distributions: [] codeowners: active: [atoulme, yuanlihan] - -resource_attributes: - apachedruid.cluster.name: - description: The name of the apachedruid cluster. - type: string - enabled: true - apachedruid.node.host: - description: The name of the apachedruid node. - type: string - enabled: true - apachedruid.node.service: - description: The service name of the apachedruid node. - type: string - enabled: true -attributes: - query_data_source: - name_override: data_source - description: The data source name of the query. - type: string - query_num_metrics: - name_override: num_metrics - description: The number of metrics of the query. - type: string - query_dimension: - name_override: dimension - description: The dimension of the query. - type: string - query_has_filters: - name_override: has_filters - description: Whether query has filters. - type: string - query_threshold: - name_override: threshold - description: The threshold of query. - type: int - query_num_complex_metrics: - name_override: num_complex_metrics - description: The number of complex metrics. - type: int - query_type: - name_override: type - description: The type of query. - type: string - query_remote_address: - name_override: remote_address - description: The remote address of the query. - type: string - query_id: - name_override: id - description: The id of query. - type: string - query_context: - name_override: context - description: The context of the query. - type: string - query_num_dimensions: - name_override: num_dimensions - description: The number of dimensions of query. - type: string - query_interval: - name_override: interval - description: The interval of the query. - type: string - query_duration: - name_override: duration - description: The duration of query. - type: string - query_status: - name_override: status - description: The status of the query. - type: string - query_server: - name_override: server - description: The server of the query. - type: string - query_lane: - name_override: lane - description: The name of query lane. - type: string - sqlQuery_data_source: - name_override: data_source - description: The data source name of the query. - type: string - sqlQuery_native_query_ids: - name_override: native_query_ids - description: The native query ids of sql query. - type: string - sqlQuery_engine: - name_override: engine - description: The engine name of the sql query. - type: string - sqlQuery_remote_address: - name_override: remote_address - description: The remote address of sql query. - type: string - sqlQuery_id: - name_override: id - description: The id of sql query. - type: string - sqlQuery_success: - name_override: success - description: Whether sql query is successful. - type: string - serverview_tier: - name_override: tier - description: The name of the tier. - type: string - serverview_server: - name_override: server - description: The address of server. - type: string - query_segment: - name_override: segment - description: The segment of the query. - type: string - query_vectorized: - name_override: vectorized - description: Whether query is vectorized. - type: string - ingest_task_type: - name_override: task_type - description: The type of ingestion task. - type: string - ingest_data_source: - name_override: data_source - description: The data source of ingestion task. - type: string - ingest_group_id: - name_override: group_id - description: The ingestion group id. - type: string - ingest_tags: - name_override: tags - description: The names of tags. - type: string - ingest_task_id: - name_override: task_id - description: The id of the task. - type: string - ingest_task_ingestion_mode: - name_override: task_ingestion_mode - description: The mode of ingestion task. - type: string - ingest_stream: - name_override: stream - description: The name of stream to ingest. - type: string - ingest_partition: - name_override: partition - description: The partition of the topic. - type: string - compact_task_type: - name_override: task_type - description: The type of task. - type: string - compact_data_source: - name_override: data_source - description: The data source of compaction task. - type: string - compact_group_id: - name_override: group_id - description: The group id of compaction task. - type: string - compact_tags: - name_override: tags - description: The tags of the compaction task. - type: string - compact_task_id: - name_override: task_id - description: The task id of compaction task. - type: string - task_type: - name_override: task_type - description: The type of task. - type: string - task_data_source: - name_override: data_source - description: The data source of the task. - type: string - task_group_id: - name_override: group_id - description: The group id of the task. - type: string - task_status: - name_override: task_status - description: The status of the task. - type: string - task_tags: - name_override: tags - description: The tags of task. - type: string - task_id: - name_override: task_id - description: The id of task. - type: string - task_action_type: - name_override: task_action_type - description: The action type of task. - type: string - task_interval: - name_override: interval - description: The interval of task. - type: string - task_segment_availability_confirmed: - name_override: segment_availability_confirmed - description: Whether segment availability is confirmed. - type: string - segment_task_type: - name_override: task_type - description: The task type of the segment. - type: string - segment_data_source: - name_override: data_source - description: The data source of the segment. - type: string - segment_group_id: - name_override: group_id - description: The group id of segment. - type: string - segment_tags: - name_override: tags - description: The tags of the segment. - type: string - segment_task_id: - name_override: task_id - description: The task id of segment. - type: string - segment_interval: - name_override: interval - description: The interval of segment. - type: string - taskSlot_category: - name_override: category - description: The category of task slot. - type: string - worker_category: - name_override: category - description: The category of worker. - type: string - worker_version: - name_override: worker_version - description: The verson of worker. - type: string - ingest_supervisor_task_id: - name_override: supervisor_task_id - description: The task id of supervisor. - type: string - segment_tier: - name_override: tier - description: The name of segment tier. - type: string - segment_description: - name_override: description - description: The description of segment. - type: string - segment_server: - name_override: server - description: The server of the segment. - type: string - segment_priority: - name_override: priority - description: The priority of segment. - type: string - tier: - name_override: tier - description: The name of tier. - type: string - kill_data_source: - name_override: data_source - description: The data source name of the kill task. - type: string - interval_data_source: - name_override: data_source - description: The interval of data source. - type: string - coordinator_duty: - name_override: duty - description: The name of coordinator duty task. - type: string - coordinator_duty_group: - name_override: duty_group - description: The name of the duty group. - type: string - segment_range: - name_override: range - description: The range of segment. - type: string - jvm_pool_name: - name_override: pool_name - description: The name of the pool. - type: string - jvm_pool_kind: - name_override: pool_kind - description: The pool kind of jvm. - type: string - jvm_bufferpool_name: - name_override: bufferpool_name - description: The name of buffer pool. - type: string - jvm_mem_kind: - name_override: mem_kind - description: The memory kind of jvm. - type: string - jvm_gc_gen: - name_override: gc_gen - description: The name of GC generation. - type: string - jvm_gc_name: - name_override: gc_name - description: The gc name of jvm. - type: string - ingest_service_name: - name_override: service_name - description: The name of ingestion service. - type: string - ingest_buffer_capacity: - name_override: buffer_capacity - description: The capacity of ingestion buffer. - type: string - sys_disk_name: - name_override: disk_name - description: The name of disk. - type: string - sys_net_hwaddr: - name_override: net_hwaddr - description: The net hardware address. - type: string - sys_net_name: - name_override: net_name - description: The name of network. - type: string - sys_net_address: - name_override: net_address - description: The net address. - type: string - sys_fs_dir_name: - name_override: fs_dir_name - description: The dir name. - type: string - sys_fs_dev_name: - name_override: fs_dev_name - description: The dev name. - type: string - sys_cpu_time: - name_override: cpu_time - description: The group name of cpu time usage. - type: string - sys_cpu_name: - name_override: cpu_name - description: The group name of cpu usage. - type: string -metrics: - apachedruid.query.time: - description: Milliseconds taken to complete a query. - unit: ms - gauge: - value_type: int - attributes: - [ - query_data_source, - query_num_metrics, - query_dimension, - query_has_filters, - query_threshold, - query_num_complex_metrics, - query_type, - query_remote_address, - query_id, - query_context, - query_num_dimensions, - query_interval, - query_duration, - ] - enabled: true - apachedruid.query.bytes: - description: The total number of bytes returned to the requesting client in the query response from the broker. Other services report the total bytes for their portion of the query. - unit: By - gauge: - value_type: int - attributes: - [ - query_data_source, - query_num_metrics, - query_dimension, - query_has_filters, - query_threshold, - query_num_complex_metrics, - query_type, - query_remote_address, - query_id, - query_context, - query_num_dimensions, - query_interval, - query_duration, - ] - enabled: true - apachedruid.query.node.time: - description: Milliseconds taken to query individual historical/realtime processes. - unit: ms - gauge: - value_type: int - attributes: [query_status, query_server, query_id] - enabled: true - apachedruid.query.node.bytes: - description: Number of bytes returned from querying individual historical/realtime processes. - unit: By - gauge: - value_type: int - attributes: [query_status, query_server, query_id] - enabled: true - apachedruid.query.node.ttfb: - description: Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes. - unit: ms - gauge: - value_type: int - attributes: [query_status, query_server, query_id] - enabled: true - apachedruid.query.node.backpressure: - description: Milliseconds that the channel to this process has spent suspended due to backpressure. - unit: ms - gauge: - value_type: int - attributes: [query_status, query_server, query_id] - enabled: true - apachedruid.query.count: - description: Number of total queries. - unit: "{queries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.success.count: - description: Number of queries successfully processed. - unit: "{queries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.failed.count: - description: Number of failed queries. - unit: "{queries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.interrupted.count: - description: Number of queries interrupted due to cancellation. - unit: "{queries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.timeout.count: - description: Number of timed out queries. - unit: "{queries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.merge_buffer.pending_requests: - description: Number of requests waiting to acquire a batch of buffers from the merge buffer pool. - unit: "{requests}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.segments.count: - description: This metric is not enabled by default. See the `QueryMetrics` Interface for reference regarding enabling this metric. Number of segments that will be touched by the query. In the broker, it makes a plan to distribute the query to realtime tasks and historicals based on a snapshot of segment distribution state. If there are some segments moved after this snapshot is created, certain historicals and realtime tasks can report those segments as missing to the broker. The broker will resend the query to the new servers that serve those segments after move. In this case, those segments can be counted more than once in this metric. - unit: "{segments}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.priority: - description: Assigned lane and priority, only if Laning strategy is enabled. Refer to [Laning strategies](https,//druid.apache.org/docs/latest/configuration#laning-strategies). - unit: 1 - gauge: - value_type: int - attributes: [query_type, query_data_source, query_lane] - enabled: true - apachedruid.sql_query.time: - description: Milliseconds taken to complete a SQL query. - unit: ms - gauge: - value_type: int - attributes: - [ - sqlQuery_data_source, - sqlQuery_native_query_ids, - sqlQuery_engine, - sqlQuery_remote_address, - sqlQuery_id, - sqlQuery_success, - ] - enabled: true - apachedruid.sql_query.planning_time_ms: - description: Milliseconds taken to plan a SQL to native query. - unit: ms - gauge: - value_type: int - attributes: - [ - sqlQuery_data_source, - sqlQuery_native_query_ids, - sqlQuery_engine, - sqlQuery_remote_address, - sqlQuery_id, - sqlQuery_success, - ] - enabled: true - apachedruid.sql_query.bytes: - description: Number of bytes returned in the SQL query response. - unit: By - gauge: - value_type: int - attributes: - [ - sqlQuery_data_source, - sqlQuery_native_query_ids, - sqlQuery_engine, - sqlQuery_remote_address, - sqlQuery_id, - sqlQuery_success, - ] - enabled: true - apachedruid.serverview.init.time: - description: Time taken to initialize the broker server view. Useful to detect if brokers are taking too long to start. - unit: ms - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.metadatacache.init.time: - description: Time taken to initialize the broker segment metadata cache. Useful to detect if brokers are taking too long to start. - unit: ms - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.metadatacache.refresh.count: - description: Number of segments to refresh in broker segment metadata cache. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.metadatacache.refresh.time: - description: Time taken to refresh segments in broker segment metadata cache. - unit: ms - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.serverview.sync.healthy: - description: Sync status of the Broker with a segment-loading server such as a Historical or Peon. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. This metric can be used in conjunction with `serverview/sync/unstableTime` to debug slow startup of Brokers. - unit: 1 - gauge: - value_type: int - attributes: [serverview_tier, serverview_server] - enabled: true - apachedruid.serverview.sync.unstable_time: - description: Time in milliseconds for which the Broker has been failing to sync with a segment-loading server. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. - unit: ms - gauge: - value_type: int - attributes: [serverview_tier, serverview_server] - enabled: true - apachedruid.subquery.row_limit.count: - description: Number of subqueries whose results are materialized as rows (Java objects on heap). - unit: "{subqueries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.subquery.byte_limit.count: - description: Number of subqueries whose results are materialized as frames (Druid's internal byte representation of rows). - unit: "{subqueries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.subquery.fallback.count: - description: Number of subqueries which cannot be materialized as frames. - unit: "{subqueries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.subquery.fallback.insufficient_type.count: - description: Number of subqueries which cannot be materialized as frames due to insufficient type information in the row signature. - unit: "{subqueries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.subquery.fallback.unknown_reason.count: - description: Number of subqueries which cannot be materialized as frames due other reasons. - unit: "{subqueries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.row_limit.exceeded.count: - description: Number of queries whose inlined subquery results exceeded the given row limit. - unit: "{queries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.byte_limit.exceeded.count: - description: Number of queries whose inlined subquery results exceeded the given byte limit. - unit: "{queries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.segment.time: - description: Milliseconds taken to query individual segment. Includes time to page in the segment from disk. - unit: ms - gauge: - value_type: int - attributes: [query_status, query_segment, query_id, query_vectorized] - enabled: true - apachedruid.query.wait.time: - description: Milliseconds spent waiting for a segment to be scanned. - unit: ms - gauge: - value_type: int - attributes: [query_segment, query_id] - enabled: true - apachedruid.segment.scan.pending: - description: Number of segments in queue waiting to be scanned. - unit: "{segments}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.segment.scan.active: - description: Number of segments currently scanned. This metric also indicates how many threads from `druid.processing.numThreads` are currently being used. - unit: "{segments}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.segment_and_cache.time: - description: Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process). - unit: ms - gauge: - value_type: int - attributes: [query_segment, query_id] - enabled: true - apachedruid.query.cpu.time: - description: Microseconds of CPU time taken to complete a query. - unit: ms - gauge: - value_type: int - attributes: - [ - query_data_source, - query_num_metrics, - query_dimension, - query_has_filters, - query_threshold, - query_num_complex_metrics, - query_type, - query_remote_address, - query_id, - query_context, - query_num_dimensions, - query_interval, - query_duration, - ] - enabled: true - apachedruid.jetty.num_open_connections: - description: Number of open jetty connections. - unit: "{connections}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.jetty.thread_pool.total: - description: Number of total workable threads allocated. - unit: "{threads}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.jetty.thread_pool.idle: - description: Number of idle threads. - unit: "{threads}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.jetty.thread_pool.busy: - description: Number of busy threads that has work to do from the worker queue. - unit: "{threads}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.jetty.thread_pool.is_low_on_threads: - description: A rough indicator of whether number of total workable threads allocated is enough to handle the works in the work queue. - unit: "{threads}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.jetty.thread_pool.min: - description: Number of minimum threads allocatable. - unit: "{threads}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.jetty.thread_pool.max: - description: Number of maximum threads allocatable. - unit: "{threads}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.jetty.thread_pool.queue_size: - description: Size of the worker queue. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.num_entries: - description: Number of cache entries. - unit: "{entries}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.num_entries: - description: Number of cache entries. - unit: "{entries}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.size_bytes: - description: Size in bytes of cache entries. - unit: By - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.size_bytes: - description: Size in bytes of cache entries. - unit: By - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.hits: - description: Number of cache hits. - unit: "{hits}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.hits: - description: Number of cache hits. - unit: "{hits}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.misses: - description: Number of cache misses. - unit: "{misses}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.misses: - description: Number of cache misses. - unit: "{misses}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.evictions: - description: Number of cache evictions. - unit: "{evictions}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.evictions: - description: Number of cache evictions. - unit: "{evictions}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.hit_rate: - description: Cache hit rate. - unit: 1.0 - sum: - monotonic: true - aggregation_temporality: delta - value_type: double - attributes: [] - enabled: true - apachedruid.query.cache.total.hit_rate: - description: Cache hit rate. - unit: 1.0 - gauge: - value_type: double - attributes: [] - enabled: true - apachedruid.query.cache.delta.average_bytes: - description: Average cache entry byte size. - unit: By - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.average_bytes: - description: Average cache entry byte size. - unit: By - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.timeouts: - description: Number of cache timeouts. - unit: "{timeouts}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.timeouts: - description: Number of cache timeouts. - unit: "{timeouts}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.errors: - description: Number of cache errors. - unit: "{errors}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.errors: - description: Number of cache errors. - unit: "{errors}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.put.ok: - description: Number of new cache entries successfully cached. - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.put.ok: - description: Number of new cache entries successfully cached. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.put.error: - description: Number of new cache entries that could not be cached due to errors. - unit: "{errors}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.put.error: - description: Number of new cache entries that could not be cached due to errors. - unit: "{errors}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.delta.put.oversized: - description: Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties). - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.total.put.oversized: - description: Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties). - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.memcached.total: - description: Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their actual values. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.query.cache.memcached.delta: - description: Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their delta from the prior event emission. - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.ingest.count: - description: Count of `1` every time an ingestion job runs (includes compaction jobs). Aggregate using dimensions. - unit: 1 - gauge: - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ingest_task_ingestion_mode, - ] - enabled: true - apachedruid.ingest.segments.count: - description: Count of final segments created by job (includes tombstones). - unit: 1 - gauge: - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ingest_task_ingestion_mode, - ] - enabled: true - apachedruid.ingest.tombstones.count: - description: Count of tombstones created by job. - unit: 1 - gauge: - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ingest_task_ingestion_mode, - ] - enabled: true - apachedruid.ingest.kafka.lag: - description: Total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. - unit: 1 - gauge: - value_type: int - attributes: [ingest_tags, ingest_stream, ingest_data_source] - enabled: true - apachedruid.ingest.kafka.max_lag: - description: Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. - unit: 1 - gauge: - value_type: int - attributes: [ingest_tags, ingest_stream, ingest_data_source] - enabled: true - apachedruid.ingest.kafka.avg_lag: - description: Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. - unit: 1 - gauge: - value_type: int - attributes: [ingest_tags, ingest_stream, ingest_data_source] - enabled: true - apachedruid.ingest.kafka.partition_lag: - description: Partition-wise lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers. Minimum emission period for this metric is a minute. - unit: 1 - gauge: - value_type: int - attributes: - [ingest_tags, ingest_partition, ingest_stream, ingest_data_source] - enabled: true - apachedruid.ingest.kinesis.lag.time: - description: Total lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. - unit: ms - gauge: - value_type: int - attributes: [ingest_tags, ingest_stream, ingest_data_source] - enabled: true - apachedruid.ingest.kinesis.max_lag.time: - description: Max lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. - unit: ms - gauge: - value_type: int - attributes: [ingest_tags, ingest_stream, ingest_data_source] - enabled: true - apachedruid.ingest.kinesis.avg_lag.time: - description: Average lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. - unit: ms - gauge: - value_type: int - attributes: [ingest_tags, ingest_stream, ingest_data_source] - enabled: true - apachedruid.ingest.kinesis.partition_lag.time: - description: Partition-wise lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis. Minimum emission period for this metric is a minute. - unit: ms - gauge: - value_type: int - attributes: - [ingest_tags, ingest_partition, ingest_stream, ingest_data_source] - enabled: true - apachedruid.compact.segment_analyzer.fetch_and_process_millis: - description: Time taken to fetch and process segments to infer the schema for the compaction task to run. - unit: 1 - gauge: - value_type: int - attributes: - [ - compact_task_type, - compact_data_source, - compact_group_id, - compact_tags, - compact_task_id, - ] - enabled: true - apachedruid.ingest.events.processed: - description: Number of events processed per emission period. - unit: "{events}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.events.processed_with_error: - description: Number of events processed with some partial errors per emission period. Events processed with partial errors are counted towards both this metric and `ingest/events/processed`. - unit: "{events}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.events.unparseable: - description: Number of events rejected because the events are unparseable. - unit: "{events}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.events.thrown_away: - description: Number of events rejected because they are null, or filtered by `transformSpec`, or outside one of `lateMessageRejectionPeriod`, `earlyMessageRejectionPeriod`, or `windowPeriod`. - unit: "{events}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.events.duplicate: - description: Number of events rejected because the events are duplicated. - unit: "{events}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.input.bytes: - description: Number of bytes read from input sources, after decompression but prior to parsing. This covers all data read, including data that does not end up being fully processed and ingested. For example, this includes data that ends up being rejected for being unparseable or filtered out. - unit: By - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.rows.output: - description: Number of Druid rows persisted. - unit: "{rows}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ingest_task_type, ingest_task_id, ingest_data_source, ingest_group_id] - enabled: true - apachedruid.ingest.persists.count: - description: Number of times persist occurred. - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.persists.time: - description: Milliseconds spent doing intermediate persist. - unit: ms - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.persists.cpu: - description: CPU time in nanoseconds spent on doing intermediate persist. - unit: ns - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.persists.back_pressure: - description: Milliseconds spent creating persist tasks and blocking waiting for them to finish. - unit: ms - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.persists.failed: - description: Number of persists that failed. - unit: "{persists}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.handoff.failed: - description: Number of handoffs that failed. - unit: "{handoffs}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.merge.time: - description: Milliseconds spent merging intermediate segments. - unit: ms - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.merge.cpu: - description: CPU time in Nanoseconds spent on merging intermediate segments. - unit: ns - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.handoff.count: - description: Number of handoffs that happened. - unit: "{handoffs}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.sink.count: - description: Number of sinks not handed off. - unit: "{sinks}" - gauge: - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.events.message_gap: - description: Time gap in milliseconds between the latest ingested event timestamp and the current system timestamp of metrics emission. If the value is increasing but lag is low, Druid may not be receiving new data. This metric is reset as new tasks spawn up. - unit: ms - gauge: - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.notices.queue_size: - description: Number of pending notices to be processed by the coordinator. - unit: "{notices}" - gauge: - value_type: int - attributes: [ingest_tags, ingest_data_source] - enabled: true - apachedruid.ingest.notices.time: - description: Milliseconds taken to process a notice by the supervisor. - unit: ms - gauge: - value_type: int - attributes: [ingest_tags, ingest_data_source] - enabled: true - apachedruid.ingest.pause.time: - description: Milliseconds spent by a task in a paused state without ingesting. - unit: ms - gauge: - value_type: int - attributes: [ingest_tags, ingest_task_id, ingest_data_source] - enabled: true - apachedruid.ingest.handoff.time: - description: Total number of milliseconds taken to handoff a set of segments. - unit: ms - gauge: - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_group_id, - ingest_tags, - ingest_task_id, - ] - enabled: true - apachedruid.task.run.time: - description: Milliseconds taken to run a task. - unit: ms - gauge: - value_type: int - attributes: - [ - task_type, - task_data_source, - task_group_id, - task_status, - task_tags, - task_id, - ] - enabled: true - apachedruid.task.pending.time: - description: Milliseconds taken for a task to wait for running. - unit: ms - gauge: - value_type: int - attributes: [task_type, task_data_source, task_group_id, task_tags, task_id] - enabled: true - apachedruid.task.action.log.time: - description: Milliseconds taken to log a task action to the audit log. - unit: ms - gauge: - value_type: int - attributes: - [ - task_type, - task_data_source, - task_action_type, - task_group_id, - task_tags, - task_id, - ] - enabled: true - apachedruid.task.action.run.time: - description: Milliseconds taken to execute a task action. - unit: ms - gauge: - value_type: int - attributes: - [ - task_type, - task_data_source, - task_action_type, - task_group_id, - task_tags, - task_id, - ] - enabled: true - apachedruid.task.action.success.count: - description: Number of task actions that were executed successfully during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - unit: "{actions}" - gauge: - value_type: int - attributes: - [ - task_type, - task_data_source, - task_action_type, - task_group_id, - task_tags, - task_id, - ] - enabled: true - apachedruid.task.action.failed.count: - description: Number of task actions that failed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - unit: "{actions}" - gauge: - value_type: int - attributes: - [ - task_type, - task_data_source, - task_action_type, - task_group_id, - task_tags, - task_id, - ] - enabled: true - apachedruid.task.action.batch.queue_time: - description: Milliseconds spent by a batch of task actions in queue. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - unit: ms - gauge: - value_type: int - attributes: [task_interval, task_data_source, task_action_type] - enabled: true - apachedruid.task.action.batch.run_time: - description: Milliseconds taken to execute a batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - unit: ms - gauge: - value_type: int - attributes: [task_interval, task_data_source, task_action_type] - enabled: true - apachedruid.task.action.batch.size: - description: Number of task actions in a batch that was executed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - unit: "{actions}" - gauge: - value_type: int - attributes: [task_interval, task_data_source, task_action_type] - enabled: true - apachedruid.task.action.batch.attempts: - description: Number of execution attempts for a single batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - unit: "{attempts}" - gauge: - value_type: int - attributes: [task_interval, task_data_source, task_action_type] - enabled: true - apachedruid.task.segment_availability.wait.time: - description: The amount of milliseconds a batch indexing task waited for newly created segments to become available for querying. - unit: ms - gauge: - value_type: int - attributes: - [ - task_type, - task_data_source, - task_group_id, - task_segment_availability_confirmed, - task_tags, - task_id, - ] - enabled: true - apachedruid.segment.added.bytes: - description: Size in bytes of new segments created. - unit: By - gauge: - value_type: int - attributes: - [ - segment_task_type, - segment_data_source, - segment_group_id, - segment_tags, - segment_task_id, - segment_interval, - ] - enabled: true - apachedruid.segment.moved.bytes: - description: Size in bytes of segments moved/archived via the Move Task. - unit: By - gauge: - value_type: int - attributes: - [ - segment_task_type, - segment_data_source, - segment_group_id, - segment_tags, - segment_task_id, - segment_interval, - ] - enabled: true - apachedruid.segment.nuked.bytes: - description: Size in bytes of segments deleted via the Kill Task. - unit: By - gauge: - value_type: int - attributes: - [ - segment_task_type, - segment_data_source, - segment_group_id, - segment_tags, - segment_task_id, - segment_interval, - ] - enabled: true - apachedruid.task.success.count: - description: Number of successful tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [task_data_source] - enabled: true - apachedruid.task.failed.count: - description: Number of failed tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [task_data_source] - enabled: true - apachedruid.task.running.count: - description: Number of current running tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [task_data_source] - enabled: true - apachedruid.task.pending.count: - description: Number of current pending tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [task_data_source] - enabled: true - apachedruid.task.waiting.count: - description: Number of current waiting tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [task_data_source] - enabled: true - apachedruid.task_slot.total.count: - description: Number of total task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - unit: "{slots}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [taskSlot_category] - enabled: true - apachedruid.task_slot.idle.count: - description: Number of idle task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - unit: "{slots}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [taskSlot_category] - enabled: true - apachedruid.task_slot.used.count: - description: Number of busy task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - unit: "{slots}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [taskSlot_category] - enabled: true - apachedruid.task_slot.lazy.count: - description: Number of total task slots in lazy marked Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - unit: "{slots}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [taskSlot_category] - enabled: true - apachedruid.task_slot.blacklisted.count: - description: Number of total task slots in blacklisted Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - unit: "{slots}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [taskSlot_category] - enabled: true - apachedruid.worker.task.failed.count: - description: Number of failed tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [worker_category, worker_version] - enabled: true - apachedruid.worker.task.success.count: - description: Number of successful tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [worker_category, worker_version] - enabled: true - apachedruid.worker.task_slot.idle.count: - description: Number of idle task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. - unit: "{slots}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [worker_category, worker_version] - enabled: true - apachedruid.worker.task_slot.total.count: - description: Number of total task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included. - unit: "{slots}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [worker_category, worker_version] - enabled: true - apachedruid.worker.task_slot.used.count: - description: Number of busy task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included. - unit: "{slots}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [worker_category, worker_version] - enabled: true - apachedruid.ingest.shuffle.bytes: - description: Number of bytes shuffled per emission period. - unit: By - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [ingest_supervisor_task_id] - enabled: true - apachedruid.ingest.shuffle.requests: - description: Number of shuffle requests per emission period. - unit: "{requests}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [ingest_supervisor_task_id] - enabled: true - apachedruid.segment.assigned.count: - description: Number of segments assigned to be loaded in the cluster. - unit: "{segments}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_tier, segment_data_source] - enabled: true - apachedruid.segment.moved.count: - description: Number of segments moved in the cluster. - unit: "{segments}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_tier, segment_data_source] - enabled: true - apachedruid.segment.dropped.count: - description: Number of segments chosen to be dropped from the cluster due to being over-replicated. - unit: "{segments}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_tier, segment_data_source] - enabled: true - apachedruid.segment.deleted.count: - description: Number of segments marked as unused due to drop rules. - unit: "{segments}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.segment.unneeded.count: - description: Number of segments dropped due to being marked as unused. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_tier, segment_data_source] - enabled: true - apachedruid.segment.assign_skipped.count: - description: Number of segments that could not be assigned to any server for loading. This can occur due to replication throttling, no available disk space, or a full load queue. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_description, segment_tier, segment_data_source] - enabled: true - apachedruid.segment.move_skipped.count: - description: Number of segments that were chosen for balancing but could not be moved. This can occur when segments are already optimally placed. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_description, segment_tier, segment_data_source] - enabled: true - apachedruid.segment.drop_skipped.count: - description: Number of segments that could not be dropped from any server. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_description, segment_tier, segment_data_source] - enabled: true - apachedruid.segment.load_queue.size: - description: Size in bytes of segments to load. - unit: By - gauge: - value_type: int - attributes: [segment_server] - enabled: true - apachedruid.segment.load_queue.count: - description: Number of segments to load. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_server] - enabled: true - apachedruid.segment.drop_queue.count: - description: Number of segments to drop. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_server] - enabled: true - apachedruid.segment.load_queue.assigned: - description: Number of segments assigned for load or drop to the load queue of a server. - unit: "{segments}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_server, segment_data_source] - enabled: true - apachedruid.segment.load_queue.success: - description: Number of segment assignments that completed successfully. - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_server, segment_data_source] - enabled: true - apachedruid.segment.load_queue.failed: - description: Number of segment assignments that failed to complete. - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_server, segment_data_source] - enabled: true - apachedruid.segment.load_queue.cancelled: - description: Number of segment assignments that were canceled before completion. - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_server, segment_data_source] - enabled: true - apachedruid.segment.size: - description: Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs. - unit: 1 - gauge: - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.segment.count: - description: Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_priority, segment_tier, segment_data_source] - enabled: true - apachedruid.segment.over_shadowed.count: - description: Number of segments marked as unused due to being overshadowed. - unit: "{segments}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.segment.unavailable.count: - description: Number of unique segments left to load until all used segments are available for queries. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.segment.under_replicated.count: - description: Number of segments, including replicas, left to load until all used segments are available for queries. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_tier, segment_data_source] - enabled: true - apachedruid.tier.historical.count: - description: Number of available historical nodes in each tier. - unit: 1 - gauge: - value_type: int - attributes: [tier] - enabled: true - apachedruid.tier.replication.factor: - description: Configured maximum replication factor in each tier. - unit: 1 - gauge: - value_type: int - attributes: [tier] - enabled: true - apachedruid.tier.required.capacity: - description: Total capacity in bytes required in each tier. - unit: By - gauge: - value_type: int - attributes: [tier] - enabled: true - apachedruid.tier.total.capacity: - description: Total capacity in bytes available in each tier. - unit: By - gauge: - value_type: int - attributes: [tier] - enabled: true - apachedruid.compact.task.count: - description: Number of tasks issued in the auto compaction run. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.compact_task.max_slot.count: - description: Maximum number of task slots available for auto compaction tasks in the auto compaction run. - unit: "{slots}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.compact_task.available_slot.count: - description: Number of available task slots that can be used for auto compaction tasks in the auto compaction run. This is the max number of task slots minus any currently running compaction tasks. - unit: "{slots}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.kill_task.available_slot.count: - description: Number of available task slots that can be used for auto kill tasks in the auto kill run. This is the max number of task slots minus any currently running auto kill tasks. - unit: "{slots}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.kill_task.max_slot.count: - description: Maximum number of task slots available for auto kill tasks in the auto kill run. - unit: "{slots}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.kill.task.count: - description: Number of tasks issued in the auto kill run. - unit: "{tasks}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.kill.pending_segments.count: - description: Number of stale pending segments deleted from the metadata store. - unit: "{segments}" - gauge: - value_type: int - attributes: [kill_data_source] - enabled: true - apachedruid.segment.wait_compact.bytes: - description: Total bytes of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). - unit: By - gauge: - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.segment.wait_compact.count: - description: Total number of segments of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.interval.wait_compact.count: - description: Total number of intervals of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). - unit: "{intervals}" - gauge: - value_type: int - attributes: [interval_data_source] - enabled: true - apachedruid.segment.compacted.bytes: - description: Total bytes of this datasource that are already compacted with the spec set in the auto compaction config. - unit: By - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.segment.compacted.count: - description: Total number of segments of this datasource that are already compacted with the spec set in the auto compaction config. - unit: "{segments}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.interval.compacted.count: - description: Total number of intervals of this datasource that are already compacted with the spec set in the auto compaction config. - unit: "{intervals}" - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [interval_data_source] - enabled: true - apachedruid.segment.skip_compact.bytes: - description: Total bytes of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. - unit: By - gauge: - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.segment.skip_compact.count: - description: Total number of segments of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. - unit: "{segments}" - gauge: - value_type: int - attributes: [segment_data_source] - enabled: true - apachedruid.interval.skip_compact.count: - description: Total number of intervals of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. - unit: "{intervals}" - gauge: - value_type: int - attributes: [interval_data_source] - enabled: true - apachedruid.coordinator.time: - description: Approximate Coordinator duty runtime in milliseconds. - unit: ms - gauge: - value_type: int - attributes: [coordinator_duty] - enabled: true - apachedruid.coordinator.global.time: - description: Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. For example, Historical Management or Indexing. - unit: ms - gauge: - value_type: int - attributes: [coordinator_duty_group] - enabled: true - apachedruid.metadata.kill.supervisor.count: - description: Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true. - unit: "{supervisors}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.metadata.kill.audit.count: - description: Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. This metric is emitted only when `druid.coordinator.kill.audit.on` is set to true. - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [] - enabled: true - apachedruid.metadata.kill.compaction.count: - description: Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. This metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.metadata.kill.rule.count: - description: Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.rule.on` is set to true. - unit: "{rules}" - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.metadata.kill.datasource.count: - description: Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run. Note that datasource metadata only exists for datasource created from supervisor. This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.segment.max: - description: Maximum byte limit available for segments. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.segment.used: - description: Bytes used for served segments. - unit: By - gauge: - value_type: int - attributes: [segment_priority, segment_tier, segment_data_source] - enabled: true - apachedruid.segment.used_percent: - description: Percentage of space used by served segments. - unit: 1.0 - gauge: - value_type: double - attributes: [segment_priority, segment_tier, segment_data_source] - enabled: true - apachedruid.segment.pending_delete: - description: On-disk size in bytes of segments that are waiting to be cleared out. - unit: By - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.segment.row_count.avg: - description: The average number of rows per segment on a historical. `SegmentStatsMonitor` must be enabled. - unit: "{rows}" - gauge: - value_type: int - attributes: [segment_priority, segment_tier, segment_data_source] - enabled: true - apachedruid.segment.row_count.range.count: - description: The number of segments in a bucket. `SegmentStatsMonitor` must be enabled. - unit: "{segments}" - gauge: - value_type: int - attributes: - [segment_priority, segment_tier, segment_data_source, segment_range] - enabled: true - apachedruid.jvm.pool.committed: - description: Committed pool. - unit: By - gauge: - value_type: int - attributes: [jvm_pool_name, jvm_pool_kind] - enabled: true - apachedruid.jvm.pool.init: - description: Initial pool. - unit: By - gauge: - value_type: int - attributes: [jvm_pool_name, jvm_pool_kind] - enabled: true - apachedruid.jvm.pool.max: - description: Max pool. - unit: By - gauge: - value_type: int - attributes: [jvm_pool_name, jvm_pool_kind] - enabled: true - apachedruid.jvm.pool.used: - description: Pool used. - unit: By - gauge: - value_type: int - attributes: [jvm_pool_name, jvm_pool_kind] - enabled: true - apachedruid.jvm.bufferpool.count: - description: Bufferpool count. - unit: 1 - gauge: - value_type: int - attributes: [jvm_bufferpool_name] - enabled: true - apachedruid.jvm.bufferpool.used: - description: Bufferpool used. - unit: 1 - gauge: - value_type: int - attributes: [jvm_bufferpool_name] - enabled: true - apachedruid.jvm.bufferpool.capacity: - description: Bufferpool capacity. - unit: 1 - gauge: - value_type: int - attributes: [jvm_bufferpool_name] - enabled: true - apachedruid.jvm.mem.init: - description: Initial memory. - unit: By - gauge: - value_type: int - attributes: [jvm_mem_kind] - enabled: true - apachedruid.jvm.mem.max: - description: Max memory. - unit: By - gauge: - value_type: int - attributes: [jvm_mem_kind] - enabled: true - apachedruid.jvm.mem.used: - description: Used memory. - unit: By - gauge: - value_type: int - attributes: [jvm_mem_kind] - enabled: true - apachedruid.jvm.mem.committed: - description: Committed memory. - unit: By - gauge: - value_type: int - attributes: [jvm_mem_kind] - enabled: true - apachedruid.jvm.gc.count: - description: Garbage collection count. - unit: 1 - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [jvm_gc_gen, jvm_gc_name] - enabled: true - apachedruid.jvm.gc.cpu: - description: Count of CPU time in Nanoseconds spent on garbage collection. Note, `jvm/gc/cpu` represents the total time over multiple GC cycles; divide by `jvm/gc/count` to get the mean GC time per cycle. - unit: ns - sum: - monotonic: true - aggregation_temporality: delta - value_type: int - attributes: [jvm_gc_gen, jvm_gc_name] - enabled: true - apachedruid.zk.connected: - description: Indicator of connection status. `1` for connected, `0` for disconnected. Emitted once per monitor period. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.zk.reconnect.time: - description: Amount of time, in milliseconds, that a server was disconnected from ZooKeeper before reconnecting. Emitted on reconnection. Not emitted if connection to ZooKeeper is permanently lost, because in this case, there is no reconnection. - unit: ms - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.ingest.events.buffered: - description: Number of events queued in the `EventReceiverFirehose` buffer. - unit: "{events}" - gauge: - value_type: int - attributes: - [ - ingest_task_type, - ingest_data_source, - ingest_service_name, - ingest_buffer_capacity, - ingest_task_id, - ] - enabled: true - apachedruid.ingest.bytes.received: - description: Number of bytes received by the `EventReceiverFirehose`. - unit: By - gauge: - value_type: int - attributes: - [ - ingest_task_type, - ingest_task_id, - ingest_data_source, - ingest_service_name, - ] - enabled: true - apachedruid.sys.swap.free: - description: Free swap. - unit: By - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.swap.max: - description: Max swap. - unit: By - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.swap.page_in: - description: Paged in swap. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.swap.page_out: - description: Paged out swap. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.disk.write.count: - description: Writes to disk. - unit: 1 - gauge: - value_type: int - attributes: [sys_disk_name] - enabled: true - apachedruid.sys.disk.read.count: - description: Reads from disk. - unit: 1 - gauge: - value_type: int - attributes: [sys_disk_name] - enabled: true - apachedruid.sys.disk.write.size: - description: Bytes written to disk. One indicator of the amount of paging occurring for segments. - unit: By - gauge: - value_type: int - attributes: [sys_disk_name] - enabled: true - apachedruid.sys.disk.read.size: - description: Bytes read from disk. One indicator of the amount of paging occurring for segments. - unit: By - gauge: - value_type: int - attributes: [sys_disk_name] - enabled: true - apachedruid.sys.disk.queue: - description: Disk queue length. Measures number of requests waiting to be processed by disk. - unit: 1 - gauge: - value_type: int - attributes: [sys_disk_name] - enabled: true - apachedruid.sys.disk.transfer_time: - description: Transfer time to read from or write to disk. - unit: ms - gauge: - value_type: int - attributes: [sys_disk_name] - enabled: true - apachedruid.sys.net.write.size: - description: Bytes written to the network. - unit: By - gauge: - value_type: int - attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] - enabled: true - apachedruid.sys.net.read.size: - description: Bytes read from the network. - unit: By - gauge: - value_type: int - attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] - enabled: true - apachedruid.sys.net.read.packets: - description: Total packets read from the network. - unit: 1 - gauge: - value_type: int - attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] - enabled: true - apachedruid.sys.net.write.packets: - description: Total packets written to the network. - unit: 1 - gauge: - value_type: int - attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] - enabled: true - apachedruid.sys.net.read.errors: - description: Total network read errors. - unit: 1 - gauge: - value_type: int - attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] - enabled: true - apachedruid.sys.net.write.errors: - description: Total network write errors. - unit: 1 - gauge: - value_type: int - attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] - enabled: true - apachedruid.sys.net.read.dropped: - description: Total packets dropped coming from network. - unit: 1 - gauge: - value_type: int - attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] - enabled: true - apachedruid.sys.net.write.collisions: - description: Total network write collisions. - unit: 1 - gauge: - value_type: int - attributes: [sys_net_hwaddr, sys_net_name, sys_net_address] - enabled: true - apachedruid.sys.fs.used: - description: Filesystem bytes used. - unit: By - gauge: - value_type: int - attributes: [sys_fs_dir_name, sys_fs_dev_name] - enabled: true - apachedruid.sys.fs.max: - description: Filesystem bytes max. - unit: By - gauge: - value_type: int - attributes: [sys_fs_dir_name, sys_fs_dev_name] - enabled: true - apachedruid.sys.fs.files.count: - description: Filesystem total IO nodes. - unit: 1 - gauge: - value_type: int - attributes: [sys_fs_dir_name, sys_fs_dev_name] - enabled: true - apachedruid.sys.fs.files.free: - description: Filesystem free IO nodes. - unit: 1 - gauge: - value_type: int - attributes: [sys_fs_dir_name, sys_fs_dev_name] - enabled: true - apachedruid.sys.mem.used: - description: Memory used. - unit: By - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.mem.max: - description: Memory max. - unit: By - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.mem.free: - description: Memory free. - unit: By - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.storage.used: - description: Disk space used. - unit: 1 - gauge: - value_type: int - attributes: [sys_fs_dir_name] - enabled: true - apachedruid.sys.cpu: - description: CPU used. - unit: 1 - gauge: - value_type: int - attributes: [sys_cpu_time, sys_cpu_name] - enabled: true - apachedruid.sys.uptime: - description: Total system uptime. - unit: s - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.la.1: - description: System CPU load averages over past `i` minutes, where `i={1,5,15}`. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.la.5: - description: System CPU load averages over past `i` minutes, where `i={1,5,15}`. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.la.15: - description: System CPU load averages over past `i` minutes, where `i={1,5,15}`. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.active_opens: - description: Total TCP active open connections. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.passive_opens: - description: Total TCP passive open connections. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.attempt_fails: - description: Total TCP active connection failures. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.estab_resets: - description: Total TCP connection resets. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.in.segs: - description: Total segments received in connection. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.in.errs: - description: Errors while reading segments. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.out.segs: - description: Total segments sent. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.out.rsts: - description: Total `out reset` packets sent to reset the connection. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true - apachedruid.sys.tcpv4.retrans.segs: - description: Total segments re-transmitted. - unit: 1 - gauge: - value_type: int - attributes: [] - enabled: true diff --git a/receiver/apachedruidreceiver/receiver_test.go b/receiver/apachedruidreceiver/receiver_test.go index dbbc980df3644..bd17836383175 100644 --- a/receiver/apachedruidreceiver/receiver_test.go +++ b/receiver/apachedruidreceiver/receiver_test.go @@ -20,7 +20,7 @@ import ( func TestWriteLineProtocol_v2API(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) config := &Config{ - HTTPServerSettings: confighttp.HTTPServerSettings{ + HTTPServerSettings: confighttp.ServerConfig{ Endpoint: addr, }, } From d9b0d0cd8f54404834716845d3e2e735e912a26a Mon Sep 17 00:00:00 2001 From: Yuanli Han Date: Mon, 25 Mar 2024 16:28:04 +0800 Subject: [PATCH 7/8] remove unwanted files in the first PR --- cmd/otelcontribcol/builder-config.yaml | 2 - cmd/otelcontribcol/components.go | 2 - cmd/otelcontribcol/go.mod | 3 - receiver/apachedruidreceiver/documentation.md | 3217 ----------------- 4 files changed, 3224 deletions(-) delete mode 100644 receiver/apachedruidreceiver/documentation.md diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index c24d6009fad4a..430c1dc39ae76 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -119,7 +119,6 @@ receivers: - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.96.1-0.20240315172937-3b5aee0c7a16 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/activedirectorydsreceiver v0.96.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver v0.96.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver v0.96.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver v0.96.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachesparkreceiver v0.96.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver v0.96.0 @@ -266,7 +265,6 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension => ../../extension/headerssetterextension - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlemanagedprometheusexporter => ../../exporter/googlemanagedprometheusexporter - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver => ../../receiver/aerospikereceiver - - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver => ../../receiver/apachedruidreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor => ../../processor/cumulativetodeltaprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/intervalprocessor => ../../processor/intervalprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver => ../../receiver/sapmreceiver diff --git a/cmd/otelcontribcol/components.go b/cmd/otelcontribcol/components.go index f9f2231453204..b4825c85addee 100644 --- a/cmd/otelcontribcol/components.go +++ b/cmd/otelcontribcol/components.go @@ -119,7 +119,6 @@ import ( transformprocessor "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" activedirectorydsreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/activedirectorydsreceiver" aerospikereceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver" - apachedruidreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver" apachereceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver" apachesparkreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachesparkreceiver" awscloudwatchreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver" @@ -250,7 +249,6 @@ func components() (otelcol.Factories, error) { otlpreceiver.NewFactory(), activedirectorydsreceiver.NewFactory(), aerospikereceiver.NewFactory(), - apachedruidreceiver.NewFactory(), apachereceiver.NewFactory(), apachesparkreceiver.NewFactory(), awscloudwatchreceiver.NewFactory(), diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index f2be4106805e3..133d8c980b5f0 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -107,7 +107,6 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/activedirectorydsreceiver v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver v0.96.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachesparkreceiver v0.96.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver v0.96.0 @@ -838,8 +837,6 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googl replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver => ../../receiver/aerospikereceiver -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachedruidreceiver => ../../receiver/apachedruidreceiver - replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor => ../../processor/cumulativetodeltaprocessor replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/intervalprocessor => ../../processor/intervalprocessor diff --git a/receiver/apachedruidreceiver/documentation.md b/receiver/apachedruidreceiver/documentation.md deleted file mode 100644 index 56bd3de5e0c76..0000000000000 --- a/receiver/apachedruidreceiver/documentation.md +++ /dev/null @@ -1,3217 +0,0 @@ -[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) - -# apachedruid - -## Default Metrics - -The following metrics are emitted by default. Each of them can be disabled by applying the following configuration: - -```yaml -metrics: - : - enabled: false -``` - -### apachedruid.compact.segment_analyzer.fetch_and_process_millis - -Time taken to fetch and process segments to infer the schema for the compaction task to run. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of task. | Any Str | -| data_source | The data source of compaction task. | Any Str | -| group_id | The group id of compaction task. | Any Str | -| tags | The tags of the compaction task. | Any Str | -| task_id | The task id of compaction task. | Any Str | - -### apachedruid.compact.task.count - -Number of tasks issued in the auto compaction run. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -### apachedruid.compact_task.available_slot.count - -Number of available task slots that can be used for auto compaction tasks in the auto compaction run. This is the max number of task slots minus any currently running compaction tasks. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {slots} | Gauge | Int | - -### apachedruid.compact_task.max_slot.count - -Maximum number of task slots available for auto compaction tasks in the auto compaction run. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {slots} | Gauge | Int | - -### apachedruid.coordinator.global.time - -Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. For example, Historical Management or Indexing. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| duty_group | The name of the duty group. | Any Str | - -### apachedruid.coordinator.time - -Approximate Coordinator duty runtime in milliseconds. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| duty | The name of coordinator duty task. | Any Str | - -### apachedruid.ingest.bytes.received - -Number of bytes received by the `EventReceiverFirehose`. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| task_id | The id of the task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| service_name | The name of ingestion service. | Any Str | - -### apachedruid.ingest.count - -Count of `1` every time an ingestion job runs (includes compaction jobs). Aggregate using dimensions. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | -| task_ingestion_mode | The mode of ingestion task. | Any Str | - -### apachedruid.ingest.events.buffered - -Number of events queued in the `EventReceiverFirehose` buffer. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {events} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| service_name | The name of ingestion service. | Any Str | -| buffer_capacity | The capacity of ingestion buffer. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.events.duplicate - -Number of events rejected because the events are duplicated. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {events} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.events.message_gap - -Time gap in milliseconds between the latest ingested event timestamp and the current system timestamp of metrics emission. If the value is increasing but lag is low, Druid may not be receiving new data. This metric is reset as new tasks spawn up. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.events.processed - -Number of events processed per emission period. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {events} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.events.processed_with_error - -Number of events processed with some partial errors per emission period. Events processed with partial errors are counted towards both this metric and `ingest/events/processed`. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {events} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.events.thrown_away - -Number of events rejected because they are null, or filtered by `transformSpec`, or outside one of `lateMessageRejectionPeriod`, `earlyMessageRejectionPeriod`, or `windowPeriod`. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {events} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.events.unparseable - -Number of events rejected because the events are unparseable. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {events} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.handoff.count - -Number of handoffs that happened. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {handoffs} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.handoff.failed - -Number of handoffs that failed. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {handoffs} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.handoff.time - -Total number of milliseconds taken to handoff a set of segments. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.input.bytes - -Number of bytes read from input sources, after decompression but prior to parsing. This covers all data read, including data that does not end up being fully processed and ingested. For example, this includes data that ends up being rejected for being unparseable or filtered out. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| By | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.kafka.avg_lag - -Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| stream | The name of stream to ingest. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.kafka.lag - -Total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| stream | The name of stream to ingest. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.kafka.max_lag - -Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| stream | The name of stream to ingest. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.kafka.partition_lag - -Partition-wise lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers. Minimum emission period for this metric is a minute. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| partition | The partition of the topic. | Any Str | -| stream | The name of stream to ingest. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.kinesis.avg_lag.time - -Average lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| stream | The name of stream to ingest. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.kinesis.lag.time - -Total lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| stream | The name of stream to ingest. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.kinesis.max_lag.time - -Max lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis across all shards. Minimum emission period for this metric is a minute. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| stream | The name of stream to ingest. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.kinesis.partition_lag.time - -Partition-wise lag time in milliseconds between the current message sequence number consumed by the Kinesis indexing tasks and latest sequence number in Kinesis. Minimum emission period for this metric is a minute. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| partition | The partition of the topic. | Any Str | -| stream | The name of stream to ingest. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.merge.cpu - -CPU time in Nanoseconds spent on merging intermediate segments. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| ns | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.merge.time - -Milliseconds spent merging intermediate segments. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| ms | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.notices.queue_size - -Number of pending notices to be processed by the coordinator. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {notices} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.notices.time - -Milliseconds taken to process a notice by the supervisor. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.pause.time - -Milliseconds spent by a task in a paused state without ingesting. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | - -### apachedruid.ingest.persists.back_pressure - -Milliseconds spent creating persist tasks and blocking waiting for them to finish. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| ms | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.persists.count - -Number of times persist occurred. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.persists.cpu - -CPU time in nanoseconds spent on doing intermediate persist. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| ns | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.persists.failed - -Number of persists that failed. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {persists} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.persists.time - -Milliseconds spent doing intermediate persist. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| ms | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.rows.output - -Number of Druid rows persisted. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {rows} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| task_id | The id of the task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | - -### apachedruid.ingest.segments.count - -Count of final segments created by job (includes tombstones). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | -| task_ingestion_mode | The mode of ingestion task. | Any Str | - -### apachedruid.ingest.shuffle.bytes - -Number of bytes shuffled per emission period. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| By | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| supervisor_task_id | The task id of supervisor. | Any Str | - -### apachedruid.ingest.shuffle.requests - -Number of shuffle requests per emission period. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {requests} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| supervisor_task_id | The task id of supervisor. | Any Str | - -### apachedruid.ingest.sink.count - -Number of sinks not handed off. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {sinks} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | - -### apachedruid.ingest.tombstones.count - -Count of tombstones created by job. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of ingestion task. | Any Str | -| data_source | The data source of ingestion task. | Any Str | -| group_id | The ingestion group id. | Any Str | -| tags | The names of tags. | Any Str | -| task_id | The id of the task. | Any Str | -| task_ingestion_mode | The mode of ingestion task. | Any Str | - -### apachedruid.interval.compacted.count - -Total number of intervals of this datasource that are already compacted with the spec set in the auto compaction config. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {intervals} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The interval of data source. | Any Str | - -### apachedruid.interval.skip_compact.count - -Total number of intervals of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {intervals} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The interval of data source. | Any Str | - -### apachedruid.interval.wait_compact.count - -Total number of intervals of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {intervals} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The interval of data source. | Any Str | - -### apachedruid.jetty.num_open_connections - -Number of open jetty connections. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {connections} | Gauge | Int | - -### apachedruid.jetty.thread_pool.busy - -Number of busy threads that has work to do from the worker queue. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {threads} | Gauge | Int | - -### apachedruid.jetty.thread_pool.idle - -Number of idle threads. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {threads} | Gauge | Int | - -### apachedruid.jetty.thread_pool.is_low_on_threads - -A rough indicator of whether number of total workable threads allocated is enough to handle the works in the work queue. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {threads} | Gauge | Int | - -### apachedruid.jetty.thread_pool.max - -Number of maximum threads allocatable. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {threads} | Gauge | Int | - -### apachedruid.jetty.thread_pool.min - -Number of minimum threads allocatable. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {threads} | Gauge | Int | - -### apachedruid.jetty.thread_pool.queue_size - -Size of the worker queue. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.jetty.thread_pool.total - -Number of total workable threads allocated. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {threads} | Gauge | Int | - -### apachedruid.jvm.bufferpool.capacity - -Bufferpool capacity. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| bufferpool_name | The name of buffer pool. | Any Str | - -### apachedruid.jvm.bufferpool.count - -Bufferpool count. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| bufferpool_name | The name of buffer pool. | Any Str | - -### apachedruid.jvm.bufferpool.used - -Bufferpool used. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| bufferpool_name | The name of buffer pool. | Any Str | - -### apachedruid.jvm.gc.count - -Garbage collection count. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| gc_gen | The name of GC generation. | Any Str | -| gc_name | The gc name of jvm. | Any Str | - -### apachedruid.jvm.gc.cpu - -Count of CPU time in Nanoseconds spent on garbage collection. Note, `jvm/gc/cpu` represents the total time over multiple GC cycles; divide by `jvm/gc/count` to get the mean GC time per cycle. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| ns | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| gc_gen | The name of GC generation. | Any Str | -| gc_name | The gc name of jvm. | Any Str | - -### apachedruid.jvm.mem.committed - -Committed memory. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| mem_kind | The memory kind of jvm. | Any Str | - -### apachedruid.jvm.mem.init - -Initial memory. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| mem_kind | The memory kind of jvm. | Any Str | - -### apachedruid.jvm.mem.max - -Max memory. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| mem_kind | The memory kind of jvm. | Any Str | - -### apachedruid.jvm.mem.used - -Used memory. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| mem_kind | The memory kind of jvm. | Any Str | - -### apachedruid.jvm.pool.committed - -Committed pool. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| pool_name | The name of the pool. | Any Str | -| pool_kind | The pool kind of jvm. | Any Str | - -### apachedruid.jvm.pool.init - -Initial pool. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| pool_name | The name of the pool. | Any Str | -| pool_kind | The pool kind of jvm. | Any Str | - -### apachedruid.jvm.pool.max - -Max pool. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| pool_name | The name of the pool. | Any Str | -| pool_kind | The pool kind of jvm. | Any Str | - -### apachedruid.jvm.pool.used - -Pool used. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| pool_name | The name of the pool. | Any Str | -| pool_kind | The pool kind of jvm. | Any Str | - -### apachedruid.kill.pending_segments.count - -Number of stale pending segments deleted from the metadata store. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source name of the kill task. | Any Str | - -### apachedruid.kill.task.count - -Number of tasks issued in the auto kill run. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -### apachedruid.kill_task.available_slot.count - -Number of available task slots that can be used for auto kill tasks in the auto kill run. This is the max number of task slots minus any currently running auto kill tasks. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {slots} | Gauge | Int | - -### apachedruid.kill_task.max_slot.count - -Maximum number of task slots available for auto kill tasks in the auto kill run. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {slots} | Gauge | Int | - -### apachedruid.merge_buffer.pending_requests - -Number of requests waiting to acquire a batch of buffers from the merge buffer pool. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {requests} | Gauge | Int | - -### apachedruid.metadata.kill.audit.count - -Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. This metric is emitted only when `druid.coordinator.kill.audit.on` is set to true. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -### apachedruid.metadata.kill.compaction.count - -Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. This metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.metadata.kill.datasource.count - -Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run. Note that datasource metadata only exists for datasource created from supervisor. This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.metadata.kill.rule.count - -Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.rule.on` is set to true. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {rules} | Gauge | Int | - -### apachedruid.metadata.kill.supervisor.count - -Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. This metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {supervisors} | Gauge | Int | - -### apachedruid.metadatacache.init.time - -Time taken to initialize the broker segment metadata cache. Useful to detect if brokers are taking too long to start. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -### apachedruid.metadatacache.refresh.count - -Number of segments to refresh in broker segment metadata cache. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.metadatacache.refresh.time - -Time taken to refresh segments in broker segment metadata cache. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -### apachedruid.query.byte_limit.exceeded.count - -Number of queries whose inlined subquery results exceeded the given byte limit. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {queries} | Sum | Int | Delta | true | - -### apachedruid.query.bytes - -The total number of bytes returned to the requesting client in the query response from the broker. Other services report the total bytes for their portion of the query. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source name of the query. | Any Str | -| num_metrics | The number of metrics of the query. | Any Str | -| dimension | The dimension of the query. | Any Str | -| has_filters | Whether query has filters. | Any Str | -| threshold | The threshold of query. | Any Int | -| num_complex_metrics | The number of complex metrics. | Any Int | -| type | The type of query. | Any Str | -| remote_address | The remote address of the query. | Any Str | -| id | The id of query. | Any Str | -| context | The context of the query. | Any Str | -| num_dimensions | The number of dimensions of query. | Any Str | -| interval | The interval of the query. | Any Str | -| duration | The duration of query. | Any Str | - -### apachedruid.query.cache.delta.average_bytes - -Average cache entry byte size. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| By | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.errors - -Number of cache errors. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {errors} | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.evictions - -Number of cache evictions. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {evictions} | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.hit_rate - -Cache hit rate. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Double | Delta | true | - -### apachedruid.query.cache.delta.hits - -Number of cache hits. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {hits} | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.misses - -Number of cache misses. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {misses} | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.num_entries - -Number of cache entries. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {entries} | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.put.error - -Number of new cache entries that could not be cached due to errors. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {errors} | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.put.ok - -Number of new cache entries successfully cached. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.put.oversized - -Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties). - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.size_bytes - -Size in bytes of cache entries. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| By | Sum | Int | Delta | true | - -### apachedruid.query.cache.delta.timeouts - -Number of cache timeouts. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {timeouts} | Sum | Int | Delta | true | - -### apachedruid.query.cache.memcached.delta - -Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their delta from the prior event emission. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -### apachedruid.query.cache.memcached.total - -Cache metrics unique to memcached (only if `druid.cache.type=memcached`) as their actual values. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.query.cache.total.average_bytes - -Average cache entry byte size. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### apachedruid.query.cache.total.errors - -Number of cache errors. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {errors} | Gauge | Int | - -### apachedruid.query.cache.total.evictions - -Number of cache evictions. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {evictions} | Gauge | Int | - -### apachedruid.query.cache.total.hit_rate - -Cache hit rate. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Double | - -### apachedruid.query.cache.total.hits - -Number of cache hits. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {hits} | Gauge | Int | - -### apachedruid.query.cache.total.misses - -Number of cache misses. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {misses} | Gauge | Int | - -### apachedruid.query.cache.total.num_entries - -Number of cache entries. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {entries} | Gauge | Int | - -### apachedruid.query.cache.total.put.error - -Number of new cache entries that could not be cached due to errors. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {errors} | Gauge | Int | - -### apachedruid.query.cache.total.put.ok - -Number of new cache entries successfully cached. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.query.cache.total.put.oversized - -Number of potential new cache entries that were skipped due to being too large (based on `druid.{broker,historical,realtime}.cache.maxEntrySize` properties). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.query.cache.total.size_bytes - -Size in bytes of cache entries. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### apachedruid.query.cache.total.timeouts - -Number of cache timeouts. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {timeouts} | Gauge | Int | - -### apachedruid.query.count - -Number of total queries. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {queries} | Sum | Int | Delta | true | - -### apachedruid.query.cpu.time - -Microseconds of CPU time taken to complete a query. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source name of the query. | Any Str | -| num_metrics | The number of metrics of the query. | Any Str | -| dimension | The dimension of the query. | Any Str | -| has_filters | Whether query has filters. | Any Str | -| threshold | The threshold of query. | Any Int | -| num_complex_metrics | The number of complex metrics. | Any Int | -| type | The type of query. | Any Str | -| remote_address | The remote address of the query. | Any Str | -| id | The id of query. | Any Str | -| context | The context of the query. | Any Str | -| num_dimensions | The number of dimensions of query. | Any Str | -| interval | The interval of the query. | Any Str | -| duration | The duration of query. | Any Str | - -### apachedruid.query.failed.count - -Number of failed queries. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {queries} | Sum | Int | Delta | true | - -### apachedruid.query.interrupted.count - -Number of queries interrupted due to cancellation. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {queries} | Sum | Int | Delta | true | - -### apachedruid.query.node.backpressure - -Milliseconds that the channel to this process has spent suspended due to backpressure. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| status | The status of the query. | Any Str | -| server | The server of the query. | Any Str | -| id | The id of query. | Any Str | - -### apachedruid.query.node.bytes - -Number of bytes returned from querying individual historical/realtime processes. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| status | The status of the query. | Any Str | -| server | The server of the query. | Any Str | -| id | The id of query. | Any Str | - -### apachedruid.query.node.time - -Milliseconds taken to query individual historical/realtime processes. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| status | The status of the query. | Any Str | -| server | The server of the query. | Any Str | -| id | The id of query. | Any Str | - -### apachedruid.query.node.ttfb - -Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| status | The status of the query. | Any Str | -| server | The server of the query. | Any Str | -| id | The id of query. | Any Str | - -### apachedruid.query.priority - -Assigned lane and priority, only if Laning strategy is enabled. Refer to [Laning strategies](https,//druid.apache.org/docs/latest/configuration#laning-strategies). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| type | The type of query. | Any Str | -| data_source | The data source name of the query. | Any Str | -| lane | The name of query lane. | Any Str | - -### apachedruid.query.row_limit.exceeded.count - -Number of queries whose inlined subquery results exceeded the given row limit. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {queries} | Sum | Int | Delta | true | - -### apachedruid.query.segment.time - -Milliseconds taken to query individual segment. Includes time to page in the segment from disk. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| status | The status of the query. | Any Str | -| segment | The segment of the query. | Any Str | -| id | The id of query. | Any Str | -| vectorized | Whether query is vectorized. | Any Str | - -### apachedruid.query.segment_and_cache.time - -Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| segment | The segment of the query. | Any Str | -| id | The id of query. | Any Str | - -### apachedruid.query.segments.count - -This metric is not enabled by default. See the `QueryMetrics` Interface for reference regarding enabling this metric. Number of segments that will be touched by the query. In the broker, it makes a plan to distribute the query to realtime tasks and historicals based on a snapshot of segment distribution state. If there are some segments moved after this snapshot is created, certain historicals and realtime tasks can report those segments as missing to the broker. The broker will resend the query to the new servers that serve those segments after move. In this case, those segments can be counted more than once in this metric. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {segments} | Sum | Int | Delta | true | - -### apachedruid.query.success.count - -Number of queries successfully processed. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {queries} | Sum | Int | Delta | true | - -### apachedruid.query.time - -Milliseconds taken to complete a query. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source name of the query. | Any Str | -| num_metrics | The number of metrics of the query. | Any Str | -| dimension | The dimension of the query. | Any Str | -| has_filters | Whether query has filters. | Any Str | -| threshold | The threshold of query. | Any Int | -| num_complex_metrics | The number of complex metrics. | Any Int | -| type | The type of query. | Any Str | -| remote_address | The remote address of the query. | Any Str | -| id | The id of query. | Any Str | -| context | The context of the query. | Any Str | -| num_dimensions | The number of dimensions of query. | Any Str | -| interval | The interval of the query. | Any Str | -| duration | The duration of query. | Any Str | - -### apachedruid.query.timeout.count - -Number of timed out queries. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {queries} | Sum | Int | Delta | true | - -### apachedruid.query.wait.time - -Milliseconds spent waiting for a segment to be scanned. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| segment | The segment of the query. | Any Str | -| id | The id of query. | Any Str | - -### apachedruid.segment.added.bytes - -Size in bytes of new segments created. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The task type of the segment. | Any Str | -| data_source | The data source of the segment. | Any Str | -| group_id | The group id of segment. | Any Str | -| tags | The tags of the segment. | Any Str | -| task_id | The task id of segment. | Any Str | -| interval | The interval of segment. | Any Str | - -### apachedruid.segment.assign_skipped.count - -Number of segments that could not be assigned to any server for loading. This can occur due to replication throttling, no available disk space, or a full load queue. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| description | The description of segment. | Any Str | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.assigned.count - -Number of segments assigned to be loaded in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {segments} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.compacted.bytes - -Total bytes of this datasource that are already compacted with the spec set in the auto compaction config. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| By | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.compacted.count - -Total number of segments of this datasource that are already compacted with the spec set in the auto compaction config. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {segments} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.count - -Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| priority | The priority of segment. | Any Str | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.deleted.count - -Number of segments marked as unused due to drop rules. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {segments} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.drop_queue.count - -Number of segments to drop. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| server | The server of the segment. | Any Str | - -### apachedruid.segment.drop_skipped.count - -Number of segments that could not be dropped from any server. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| description | The description of segment. | Any Str | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.dropped.count - -Number of segments chosen to be dropped from the cluster due to being over-replicated. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {segments} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.load_queue.assigned - -Number of segments assigned for load or drop to the load queue of a server. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {segments} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| server | The server of the segment. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.load_queue.cancelled - -Number of segment assignments that were canceled before completion. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| server | The server of the segment. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.load_queue.count - -Number of segments to load. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| server | The server of the segment. | Any Str | - -### apachedruid.segment.load_queue.failed - -Number of segment assignments that failed to complete. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| server | The server of the segment. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.load_queue.size - -Size in bytes of segments to load. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| server | The server of the segment. | Any Str | - -### apachedruid.segment.load_queue.success - -Number of segment assignments that completed successfully. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| 1 | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| server | The server of the segment. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.max - -Maximum byte limit available for segments. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.segment.move_skipped.count - -Number of segments that were chosen for balancing but could not be moved. This can occur when segments are already optimally placed. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| description | The description of segment. | Any Str | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.moved.bytes - -Size in bytes of segments moved/archived via the Move Task. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The task type of the segment. | Any Str | -| data_source | The data source of the segment. | Any Str | -| group_id | The group id of segment. | Any Str | -| tags | The tags of the segment. | Any Str | -| task_id | The task id of segment. | Any Str | -| interval | The interval of segment. | Any Str | - -### apachedruid.segment.moved.count - -Number of segments moved in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {segments} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.nuked.bytes - -Size in bytes of segments deleted via the Kill Task. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The task type of the segment. | Any Str | -| data_source | The data source of the segment. | Any Str | -| group_id | The group id of segment. | Any Str | -| tags | The tags of the segment. | Any Str | -| task_id | The task id of segment. | Any Str | -| interval | The interval of segment. | Any Str | - -### apachedruid.segment.over_shadowed.count - -Number of segments marked as unused due to being overshadowed. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -### apachedruid.segment.pending_delete - -On-disk size in bytes of segments that are waiting to be cleared out. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### apachedruid.segment.row_count.avg - -The average number of rows per segment on a historical. `SegmentStatsMonitor` must be enabled. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {rows} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| priority | The priority of segment. | Any Str | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.row_count.range.count - -The number of segments in a bucket. `SegmentStatsMonitor` must be enabled. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| priority | The priority of segment. | Any Str | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | -| range | The range of segment. | Any Str | - -### apachedruid.segment.scan.active - -Number of segments currently scanned. This metric also indicates how many threads from `druid.processing.numThreads` are currently being used. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -### apachedruid.segment.scan.pending - -Number of segments in queue waiting to be scanned. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -### apachedruid.segment.size - -Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.skip_compact.bytes - -Total bytes of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.skip_compact.count - -Total number of segments of this datasource that are skipped (not eligible for auto compaction) by the auto compaction. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.unavailable.count - -Number of unique segments left to load until all used segments are available for queries. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.under_replicated.count - -Number of segments, including replicas, left to load until all used segments are available for queries. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.unneeded.count - -Number of segments dropped due to being marked as unused. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.used - -Bytes used for served segments. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| priority | The priority of segment. | Any Str | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.used_percent - -Percentage of space used by served segments. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Double | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| priority | The priority of segment. | Any Str | -| tier | The name of segment tier. | Any Str | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.wait_compact.bytes - -Total bytes of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.segment.wait_compact.count - -Total number of segments of this datasource waiting to be compacted by the auto compaction (only consider intervals/segments that are eligible for auto compaction). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {segments} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the segment. | Any Str | - -### apachedruid.serverview.init.time - -Time taken to initialize the broker server view. Useful to detect if brokers are taking too long to start. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -### apachedruid.serverview.sync.healthy - -Sync status of the Broker with a segment-loading server such as a Historical or Peon. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. This metric can be used in conjunction with `serverview/sync/unstableTime` to debug slow startup of Brokers. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of the tier. | Any Str | -| server | The address of server. | Any Str | - -### apachedruid.serverview.sync.unstable_time - -Time in milliseconds for which the Broker has been failing to sync with a segment-loading server. Emitted only when [HTTP-based server view](https,//druid.apache.org/docs/latest/configuration#segment-management) is enabled. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of the tier. | Any Str | -| server | The address of server. | Any Str | - -### apachedruid.sql_query.bytes - -Number of bytes returned in the SQL query response. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source name of the query. | Any Str | -| native_query_ids | The native query ids of sql query. | Any Str | -| engine | The engine name of the sql query. | Any Str | -| remote_address | The remote address of sql query. | Any Str | -| id | The id of sql query. | Any Str | -| success | Whether sql query is successful. | Any Str | - -### apachedruid.sql_query.planning_time_ms - -Milliseconds taken to plan a SQL to native query. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source name of the query. | Any Str | -| native_query_ids | The native query ids of sql query. | Any Str | -| engine | The engine name of the sql query. | Any Str | -| remote_address | The remote address of sql query. | Any Str | -| id | The id of sql query. | Any Str | -| success | Whether sql query is successful. | Any Str | - -### apachedruid.sql_query.time - -Milliseconds taken to complete a SQL query. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source name of the query. | Any Str | -| native_query_ids | The native query ids of sql query. | Any Str | -| engine | The engine name of the sql query. | Any Str | -| remote_address | The remote address of sql query. | Any Str | -| id | The id of sql query. | Any Str | -| success | Whether sql query is successful. | Any Str | - -### apachedruid.subquery.byte_limit.count - -Number of subqueries whose results are materialized as frames (Druid's internal byte representation of rows). - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {subqueries} | Sum | Int | Delta | true | - -### apachedruid.subquery.fallback.count - -Number of subqueries which cannot be materialized as frames. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {subqueries} | Sum | Int | Delta | true | - -### apachedruid.subquery.fallback.insufficient_type.count - -Number of subqueries which cannot be materialized as frames due to insufficient type information in the row signature. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {subqueries} | Sum | Int | Delta | true | - -### apachedruid.subquery.fallback.unknown_reason.count - -Number of subqueries which cannot be materialized as frames due other reasons. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {subqueries} | Sum | Int | Delta | true | - -### apachedruid.subquery.row_limit.count - -Number of subqueries whose results are materialized as rows (Java objects on heap). - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {subqueries} | Sum | Int | Delta | true | - -### apachedruid.sys.cpu - -CPU used. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| cpu_time | The group name of cpu time usage. | Any Str | -| cpu_name | The group name of cpu usage. | Any Str | - -### apachedruid.sys.disk.queue - -Disk queue length. Measures number of requests waiting to be processed by disk. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| disk_name | The name of disk. | Any Str | - -### apachedruid.sys.disk.read.count - -Reads from disk. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| disk_name | The name of disk. | Any Str | - -### apachedruid.sys.disk.read.size - -Bytes read from disk. One indicator of the amount of paging occurring for segments. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| disk_name | The name of disk. | Any Str | - -### apachedruid.sys.disk.transfer_time - -Transfer time to read from or write to disk. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| disk_name | The name of disk. | Any Str | - -### apachedruid.sys.disk.write.count - -Writes to disk. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| disk_name | The name of disk. | Any Str | - -### apachedruid.sys.disk.write.size - -Bytes written to disk. One indicator of the amount of paging occurring for segments. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| disk_name | The name of disk. | Any Str | - -### apachedruid.sys.fs.files.count - -Filesystem total IO nodes. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| fs_dir_name | The dir name. | Any Str | -| fs_dev_name | The dev name. | Any Str | - -### apachedruid.sys.fs.files.free - -Filesystem free IO nodes. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| fs_dir_name | The dir name. | Any Str | -| fs_dev_name | The dev name. | Any Str | - -### apachedruid.sys.fs.max - -Filesystem bytes max. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| fs_dir_name | The dir name. | Any Str | -| fs_dev_name | The dev name. | Any Str | - -### apachedruid.sys.fs.used - -Filesystem bytes used. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| fs_dir_name | The dir name. | Any Str | -| fs_dev_name | The dev name. | Any Str | - -### apachedruid.sys.la.1 - -System CPU load averages over past `i` minutes, where `i={1,5,15}`. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.la.15 - -System CPU load averages over past `i` minutes, where `i={1,5,15}`. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.la.5 - -System CPU load averages over past `i` minutes, where `i={1,5,15}`. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.mem.free - -Memory free. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### apachedruid.sys.mem.max - -Memory max. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### apachedruid.sys.mem.used - -Memory used. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### apachedruid.sys.net.read.dropped - -Total packets dropped coming from network. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| net_hwaddr | The net hardware address. | Any Str | -| net_name | The name of network. | Any Str | -| net_address | The net address. | Any Str | - -### apachedruid.sys.net.read.errors - -Total network read errors. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| net_hwaddr | The net hardware address. | Any Str | -| net_name | The name of network. | Any Str | -| net_address | The net address. | Any Str | - -### apachedruid.sys.net.read.packets - -Total packets read from the network. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| net_hwaddr | The net hardware address. | Any Str | -| net_name | The name of network. | Any Str | -| net_address | The net address. | Any Str | - -### apachedruid.sys.net.read.size - -Bytes read from the network. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| net_hwaddr | The net hardware address. | Any Str | -| net_name | The name of network. | Any Str | -| net_address | The net address. | Any Str | - -### apachedruid.sys.net.write.collisions - -Total network write collisions. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| net_hwaddr | The net hardware address. | Any Str | -| net_name | The name of network. | Any Str | -| net_address | The net address. | Any Str | - -### apachedruid.sys.net.write.errors - -Total network write errors. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| net_hwaddr | The net hardware address. | Any Str | -| net_name | The name of network. | Any Str | -| net_address | The net address. | Any Str | - -### apachedruid.sys.net.write.packets - -Total packets written to the network. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| net_hwaddr | The net hardware address. | Any Str | -| net_name | The name of network. | Any Str | -| net_address | The net address. | Any Str | - -### apachedruid.sys.net.write.size - -Bytes written to the network. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| net_hwaddr | The net hardware address. | Any Str | -| net_name | The name of network. | Any Str | -| net_address | The net address. | Any Str | - -### apachedruid.sys.storage.used - -Disk space used. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| fs_dir_name | The dir name. | Any Str | - -### apachedruid.sys.swap.free - -Free swap. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### apachedruid.sys.swap.max - -Max swap. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### apachedruid.sys.swap.page_in - -Paged in swap. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.swap.page_out - -Paged out swap. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.active_opens - -Total TCP active open connections. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.attempt_fails - -Total TCP active connection failures. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.estab_resets - -Total TCP connection resets. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.in.errs - -Errors while reading segments. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.in.segs - -Total segments received in connection. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.out.rsts - -Total `out reset` packets sent to reset the connection. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.out.segs - -Total segments sent. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.passive_opens - -Total TCP passive open connections. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.tcpv4.retrans.segs - -Total segments re-transmitted. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.sys.uptime - -Total system uptime. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| s | Gauge | Int | - -### apachedruid.task.action.batch.attempts - -Number of execution attempts for a single batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {attempts} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| interval | The interval of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| task_action_type | The action type of task. | Any Str | - -### apachedruid.task.action.batch.queue_time - -Milliseconds spent by a batch of task actions in queue. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| interval | The interval of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| task_action_type | The action type of task. | Any Str | - -### apachedruid.task.action.batch.run_time - -Milliseconds taken to execute a batch of task actions. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| interval | The interval of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| task_action_type | The action type of task. | Any Str | - -### apachedruid.task.action.batch.size - -Number of task actions in a batch that was executed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {actions} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| interval | The interval of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| task_action_type | The action type of task. | Any Str | - -### apachedruid.task.action.failed.count - -Number of task actions that failed during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {actions} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| task_action_type | The action type of task. | Any Str | -| group_id | The group id of the task. | Any Str | -| tags | The tags of task. | Any Str | -| task_id | The id of task. | Any Str | - -### apachedruid.task.action.log.time - -Milliseconds taken to log a task action to the audit log. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| task_action_type | The action type of task. | Any Str | -| group_id | The group id of the task. | Any Str | -| tags | The tags of task. | Any Str | -| task_id | The id of task. | Any Str | - -### apachedruid.task.action.run.time - -Milliseconds taken to execute a task action. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| task_action_type | The action type of task. | Any Str | -| group_id | The group id of the task. | Any Str | -| tags | The tags of task. | Any Str | -| task_id | The id of task. | Any Str | - -### apachedruid.task.action.success.count - -Number of task actions that were executed successfully during the emission period. Currently only being emitted for [batched `segmentAllocate` actions](https,//druid.apache.org/docs/latest/ingestion/tasks#batching-segmentallocate-actions). - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {actions} | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| task_action_type | The action type of task. | Any Str | -| group_id | The group id of the task. | Any Str | -| tags | The tags of task. | Any Str | -| task_id | The id of task. | Any Str | - -### apachedruid.task.failed.count - -Number of failed tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the task. | Any Str | - -### apachedruid.task.pending.count - -Number of current pending tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the task. | Any Str | - -### apachedruid.task.pending.time - -Milliseconds taken for a task to wait for running. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| group_id | The group id of the task. | Any Str | -| tags | The tags of task. | Any Str | -| task_id | The id of task. | Any Str | - -### apachedruid.task.run.time - -Milliseconds taken to run a task. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| group_id | The group id of the task. | Any Str | -| task_status | The status of the task. | Any Str | -| tags | The tags of task. | Any Str | -| task_id | The id of task. | Any Str | - -### apachedruid.task.running.count - -Number of current running tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the task. | Any Str | - -### apachedruid.task.segment_availability.wait.time - -The amount of milliseconds a batch indexing task waited for newly created segments to become available for querying. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| task_type | The type of task. | Any Str | -| data_source | The data source of the task. | Any Str | -| group_id | The group id of the task. | Any Str | -| segment_availability_confirmed | Whether segment availability is confirmed. | Any Str | -| tags | The tags of task. | Any Str | -| task_id | The id of task. | Any Str | - -### apachedruid.task.success.count - -Number of successful tasks per emission period. This metric is only available if the `TaskCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the task. | Any Str | - -### apachedruid.task.waiting.count - -Number of current waiting tasks. This metric is only available if the `TaskCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| data_source | The data source of the task. | Any Str | - -### apachedruid.task_slot.blacklisted.count - -Number of total task slots in blacklisted Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {slots} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of task slot. | Any Str | - -### apachedruid.task_slot.idle.count - -Number of idle task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {slots} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of task slot. | Any Str | - -### apachedruid.task_slot.lazy.count - -Number of total task slots in lazy marked Middle Managers and Indexers per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {slots} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of task slot. | Any Str | - -### apachedruid.task_slot.total.count - -Number of total task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {slots} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of task slot. | Any Str | - -### apachedruid.task_slot.used.count - -Number of busy task slots per emission period. This metric is only available if the `TaskSlotCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {slots} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of task slot. | Any Str | - -### apachedruid.tier.historical.count - -Number of available historical nodes in each tier. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of tier. | Any Str | - -### apachedruid.tier.replication.factor - -Configured maximum replication factor in each tier. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of tier. | Any Str | - -### apachedruid.tier.required.capacity - -Total capacity in bytes required in each tier. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of tier. | Any Str | - -### apachedruid.tier.total.capacity - -Total capacity in bytes available in each tier. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| tier | The name of tier. | Any Str | - -### apachedruid.worker.task.failed.count - -Number of failed tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of worker. | Any Str | -| worker_version | The verson of worker. | Any Str | - -### apachedruid.worker.task.success.count - -Number of successful tasks run on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {tasks} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of worker. | Any Str | -| worker_version | The verson of worker. | Any Str | - -### apachedruid.worker.task_slot.idle.count - -Number of idle task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included, and is only supported for Middle Manager nodes. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {slots} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of worker. | Any Str | -| worker_version | The verson of worker. | Any Str | - -### apachedruid.worker.task_slot.total.count - -Number of total task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {slots} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of worker. | Any Str | -| worker_version | The verson of worker. | Any Str | - -### apachedruid.worker.task_slot.used.count - -Number of busy task slots on the reporting worker per emission period. This metric is only available if the `WorkerTaskCountStatsMonitor` module is included. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {slots} | Sum | Int | Delta | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| category | The category of worker. | Any Str | -| worker_version | The verson of worker. | Any Str | - -### apachedruid.zk.connected - -Indicator of connection status. `1` for connected, `0` for disconnected. Emitted once per monitor period. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### apachedruid.zk.reconnect.time - -Amount of time, in milliseconds, that a server was disconnected from ZooKeeper before reconnecting. Emitted on reconnection. Not emitted if connection to ZooKeeper is permanently lost, because in this case, there is no reconnection. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Int | - -## Resource Attributes - -| Name | Description | Values | Enabled | -| ---- | ----------- | ------ | ------- | -| apachedruid.cluster.name | The name of the apachedruid cluster. | Any Str | true | -| apachedruid.node.host | The name of the apachedruid node. | Any Str | true | -| apachedruid.node.service | The service name of the apachedruid node. | Any Str | true | From de5f6775e09baacca7562b276ea9a285bf6f2871 Mon Sep 17 00:00:00 2001 From: Yuanli Han Date: Mon, 25 Mar 2024 16:42:30 +0800 Subject: [PATCH 8/8] use localhost --- receiver/apachedruidreceiver/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/apachedruidreceiver/README.md b/receiver/apachedruidreceiver/README.md index 290f63dc951ae..8b17a0a73ece6 100644 --- a/receiver/apachedruidreceiver/README.md +++ b/receiver/apachedruidreceiver/README.md @@ -33,7 +33,7 @@ Example: ```yaml receivers: apachedruid: - endpoint: 0.0.0.0:9000 + endpoint: localhost:9000 ``` ## Metrics