Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

move collectd-apache tests to use golden files. #4534

Merged
merged 2 commits into from Mar 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 2 additions & 1 deletion docker/kafka/scripts/run-broker.sh
Expand Up @@ -6,4 +6,5 @@ if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then
exit 1
fi
"$KAFKA_BIN"/kafka-server-start.sh "${KAFKA_BIN}/../config/server.properties" \
--override zookeeper.connect="$KAFKA_ZOOKEEPER_CONNECT"
--override zookeeper.connect="$KAFKA_ZOOKEEPER_CONNECT" \
--override log.segment.bytes="100"
3 changes: 1 addition & 2 deletions docker/kafka/scripts/run-consumer.sh
Expand Up @@ -8,5 +8,4 @@ fi

"$KAFKA_BIN"/kafka-console-consumer.sh \
--bootstrap-server "$KAFKA_BROKER" \
--topic sfx-employee \
--max-messages $((10 + RANDOM % 100))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

seems like important tribal knowledge: any reason why we're removing the --max-messages flag? A short description would suffice, not worried about shipping this modified test as proposed.

Copy link
Contributor

@samiura samiura Mar 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this relate to @atoulme 's slack message gist.
Kafka tests would not detect some of kafka's metrics based on the activity of kafka itself. There were multiple things to it:
The message sender had a random number of messages to send. It would send 10 + at most 100.
The broker would only emit metrics related to its roll stats if it rolled a segment.

In other word this will ensure Kafka's behavior more deterministic.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

beat me to it

--topic sfx-employee
Expand Up @@ -17,19 +17,80 @@
package tests

import (
"context"
"fmt"
"path/filepath"
"runtime"
"testing"
"time"

"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/component/componenttest"
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/receiver/otlpreceiver"
"go.opentelemetry.io/collector/receiver/receivertest"
"go.uber.org/zap"

"github.com/signalfx/splunk-otel-collector/tests/testutils"
)

func TestCollectdApacheReceiverProvidesAllMetrics(t *testing.T) {
testutils.AssertAllMetricsReceived(
t, "all.yaml", "all_metrics_config.yaml", nil, nil,
)
checkGoldenFile(t, "all_metrics_config.yaml", "expected_all.yaml",
pmetrictest.IgnoreMetricAttributeValue("host"),
pmetrictest.IgnoreMetricsOrder(),
pmetrictest.IgnoreMetricValues(),
pmetrictest.IgnoreTimestamp())
}

func TestCollectdApacheReceiverProvidesDefaultMetrics(t *testing.T) {
testutils.AssertAllMetricsReceived(
t, "default.yaml", "default_metrics_config.yaml", nil, nil,
)
checkGoldenFile(t, "default_metrics_config.yaml", "expected_default.yaml",
pmetrictest.IgnoreMetricAttributeValue("host"),
pmetrictest.IgnoreMetricsOrder(),
pmetrictest.IgnoreMetricValues(),
pmetrictest.IgnoreTimestamp())
}

func checkGoldenFile(t *testing.T, configFile string, expectedFilePath string, options ...pmetrictest.CompareMetricsOption) {
f := otlpreceiver.NewFactory()
port := testutils.GetAvailablePort(t)
c := f.CreateDefaultConfig().(*otlpreceiver.Config)
c.GRPC.NetAddr.Endpoint = fmt.Sprintf("localhost:%d", port)
sink := &consumertest.MetricsSink{}
receiver, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopCreateSettings(), c, sink)
require.NoError(t, err)
require.NoError(t, receiver.Start(context.Background(), componenttest.NewNopHost()))
t.Cleanup(func() {
require.NoError(t, receiver.Shutdown(context.Background()))
})
logger, _ := zap.NewDevelopment()

dockerHost := "0.0.0.0"
if runtime.GOOS == "darwin" {
dockerHost = "host.docker.internal"
}
p, err := testutils.NewCollectorContainer().
WithConfigPath(filepath.Join("testdata", configFile)).
WithLogger(logger).
WithEnv(map[string]string{"OTLP_ENDPOINT": fmt.Sprintf("%s:%d", dockerHost, port)}).
Build()
require.NoError(t, err)
require.NoError(t, p.Start())
t.Cleanup(func() {
require.NoError(t, p.Shutdown())
})

expected, err := golden.ReadMetrics(filepath.Join("testdata", expectedFilePath))
require.NoError(t, err)

assert.EventuallyWithT(t, func(tt *assert.CollectT) {
if len(sink.AllMetrics()) == 0 {
assert.Fail(tt, "No metrics collected")
return
}
err := pmetrictest.CompareMetrics(expected, sink.AllMetrics()[len(sink.AllMetrics())-1], options...)
assert.NoError(tt, err)
}, 30*time.Second, 1*time.Second)
}
Expand Up @@ -6,6 +6,9 @@ receivers:
extraMetrics: ["*"]
intervalSeconds: 1

processors:
batch:

exporters:
otlp:
endpoint: "${OTLP_ENDPOINT}"
Expand All @@ -17,4 +20,5 @@ service:
metrics:
receivers:
- smartagent/collectd_apache
processors: [batch]
exporters: [otlp]
Expand Up @@ -4,6 +4,8 @@ receivers:
host: localhost
port: 18080
intervalSeconds: 1
processors:
batch:

exporters:
otlp:
Expand All @@ -16,4 +18,5 @@ service:
metrics:
receivers:
- smartagent/collectd_apache
processors: [batch]
exporters: [otlp]