From a3f97e6209224979c39e5ef6b991039d02dd3705 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Thu, 18 Jan 2024 13:34:58 -0600 Subject: [PATCH 01/31] updated client/scraper --- receiver/splunkenterprisereceiver/client.go | 58 ++++++++++--- receiver/splunkenterprisereceiver/config.go | 37 +++++++-- .../splunkenterprisereceiver/config_test.go | 83 ++++++++++++++++++- .../generated_component_test.go | 14 +++- .../internal/metadata/generated_config.go | 2 +- receiver/splunkenterprisereceiver/scraper.go | 14 +++- .../testdata/config.yaml | 2 +- 7 files changed, 184 insertions(+), 26 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index dcd1ee89f48c4..12b16b5f84367 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -5,6 +5,7 @@ package splunkenterprisereceiver // import "github.com/open-telemetry/openteleme import ( "context" + "errors" "fmt" "net/http" "net/url" @@ -13,32 +14,63 @@ import ( "go.opentelemetry.io/collector/component" ) +// Indexer type "enum". Included in context sent from scraper functions +const ( + typeIdx = "IDX" + typeSh = "SH" + typeCm = "CM" +) + +var ( + errCtxMissingEndpointType = errors.New("context was passed without the endpoint type included") +) + +// Type wrapper for accessing context value +type endpointType string + type splunkEntClient struct { - client *http.Client - endpoint *url.URL + client *http.Client + endpoints map[any]*url.URL } -func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySettings) (*splunkEntClient, error) { - client, err := cfg.ClientConfig.ToClient(h, s) +func newSplunkEntClient(ctx context.Context, cfg *Config, h component.Host, s component.TelemetrySettings) (*splunkEntClient, error) { + endpoints := make(map[any]*url.URL) + client, err := cfg.HTTPClientSettings.ToClient(h, s) if err != nil { return nil, err } - endpoint, _ := url.Parse(cfg.Endpoint) + // if the endpoint is defined, put it in the endpoints map for later use + // we already checked that url.Parse does not fail in cfg.Validate() + if cfg.IdxEndpoint != "" { + endpoints[typeIdx], _ = url.Parse(cfg.IdxEndpoint) + } + if cfg.SHEndpoint != "" { + endpoints[typeSh], _ = url.Parse(cfg.SHEndpoint) + } + if cfg.CMEndpoint != "" { + endpoints[typeCm], _ = url.Parse(cfg.CMEndpoint) + } return &splunkEntClient{ - client: client, - endpoint: endpoint, + client: client, + endpoints: endpoints, }, nil } // For running ad hoc searches only func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) (*http.Request, error) { + // get endpoint type from the context + eptType := ctx.Value(endpointType("type")) + if eptType == nil { + return nil, errCtxMissingEndpointType + } + // Running searches via Splunk's REST API is a two step process: First you submit the job to run // this returns a jobid which is then used in the second part to retrieve the search results if sr.Jobid == nil { path := "/services/search/jobs/" - url, _ := url.JoinPath(c.endpoint.String(), path) + url, _ := url.JoinPath(c.endpoints[eptType].String(), path) // reader for the response data data := strings.NewReader(sr.search) @@ -52,7 +84,7 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) return req, nil } path := fmt.Sprintf("/services/search/jobs/%s/results", *sr.Jobid) - url, _ := url.JoinPath(c.endpoint.String(), path) + url, _ := url.JoinPath(c.endpoints[eptType].String(), path) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { @@ -63,7 +95,13 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) } func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint string) (*http.Request, error) { - url := c.endpoint.String() + apiEndpoint + // get endpoint type from the context + eptType := ctx.Value(endpointType("type")) + if eptType == nil { + return nil, errCtxMissingEndpointType + } + + url := c.endpoints[eptType].String() + apiEndpoint req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index 9720851aa21de..996906d29a75d 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -16,6 +16,7 @@ import ( ) var ( + errUnspecifiedEndpoint = errors.New("endpoint to an unspecified node type") errBadOrMissingEndpoint = errors.New("missing a valid endpoint") errBadScheme = errors.New("endpoint scheme must be either http or https") errMissingAuthExtension = errors.New("auth extension missing from config") @@ -25,24 +26,42 @@ type Config struct { confighttp.ClientConfig `mapstructure:",squash"` scraperhelper.ScraperControllerSettings `mapstructure:",squash"` metadata.MetricsBuilderConfig `mapstructure:",squash"` + IdxEndpoint string `mapstructure:"idx_endpoint"` + SHEndpoint string `mapstructure:"sh_endpoint"` + CMEndpoint string `mapstructure:"cm_endpoint"` } func (cfg *Config) Validate() (errors error) { var targetURL *url.URL + var err error + endpoints := []string{} - if cfg.Endpoint == "" { + if cfg.Endpoint != "" { + errors = multierr.Append(errors, errUnspecifiedEndpoint) + } else if cfg.IdxEndpoint == "" && cfg.SHEndpoint == "" && cfg.CMEndpoint == "" { errors = multierr.Append(errors, errBadOrMissingEndpoint) } else { - // we want to validate that the endpoint url supplied by user is at least - // a little bit valid - var err error - targetURL, err = url.Parse(cfg.Endpoint) - if err != nil { - errors = multierr.Append(errors, errBadOrMissingEndpoint) + if cfg.IdxEndpoint != "" { + endpoints = append(endpoints, cfg.IdxEndpoint) } + if cfg.SHEndpoint != "" { + endpoints = append(endpoints, cfg.SHEndpoint) + } + if cfg.CMEndpoint != "" { + endpoints = append(endpoints, cfg.CMEndpoint) + } + + for _, e := range endpoints { + targetURL, err = url.Parse(e) + if err != nil { + errors = multierr.Append(errors, errBadOrMissingEndpoint) + continue + } - if !strings.HasPrefix(targetURL.Scheme, "http") { - errors = multierr.Append(errors, errBadScheme) + // note passes for both http and https + if !strings.HasPrefix(targetURL.Scheme, "http") { + errors = multierr.Append(errors, errBadScheme) + } } } diff --git a/receiver/splunkenterprisereceiver/config_test.go b/receiver/splunkenterprisereceiver/config_test.go index 8079fc04a3e06..9ec2adcb8a0bc 100644 --- a/receiver/splunkenterprisereceiver/config_test.go +++ b/receiver/splunkenterprisereceiver/config_test.go @@ -9,13 +9,14 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configauth" + "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap/confmaptest" + "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) -// Since there are no custom fields in config the existing tests for the components should -// cover the testing requirement. func TestLoadConfig(t *testing.T) { t.Parallel() @@ -25,3 +26,81 @@ func TestLoadConfig(t *testing.T) { _, err = cm.Sub(id.String()) require.NoError(t, err) } + +func TestEndpointCorrectness(t *testing.T) { + // Declare errors for tests that should fail + var errBad, errMisconf, errScheme error + // Error for bad or missing endpoint + errBad = multierr.Append(errBad, errBadOrMissingEndpoint) + // There is no way with the current SDK design to create a test config that + // satisfies the auth extention so we will just expect this error to appear. + errBad = multierr.Append(errBad, errMissingAuthExtension) + + // Errors related to setting the wrong endpoint field (i.e. the one from httpconfig) + errMisconf = multierr.Append(errMisconf, errUnspecifiedEndpoint) + errMisconf = multierr.Append(errMisconf, errMissingAuthExtension) + + // Error related to bad scheme (not http/s) + errScheme = multierr.Append(errScheme, errBadScheme) + errScheme = multierr.Append(errScheme, errMissingAuthExtension) + + httpCfg := confighttp.NewDefaultHTTPClientSettings() + httpCfg.Auth = &configauth.Authentication{AuthenticatorID: component.NewID("dummy")} + httpCfgWithEndpoint := httpCfg + httpCfgWithEndpoint.Endpoint = "https://123.123.32.2:2093" + httpCfgWithEndpoint.Auth = &configauth.Authentication{AuthenticatorID: component.NewID("dummy")} + + tests := []struct { + desc string + expected error + config *Config + }{ + { + desc: "missing any endpoint setting", + expected: errBad, + config: &Config{ + HTTPClientSettings: httpCfg, + }, + }, + { + desc: "configured the wrong endpoint field (httpconfig.Endpoint)", + expected: errMisconf, + config: &Config{ + HTTPClientSettings: httpCfgWithEndpoint, + }, + }, + { + desc: "properly configured invalid endpoint", + expected: errBad, + config: &Config{ + HTTPClientSettings: httpCfg, + IdxEndpoint: "123.12.23.43:80", + }, + }, + { + desc: "properly configured endpoint has bad scheme", + expected: errScheme, + config: &Config{ + HTTPClientSettings: httpCfg, + IdxEndpoint: "gss://123.124.32.12:90", + }, + }, + { + desc: "properly configured endpoint", + expected: errMissingAuthExtension, + config: &Config{ + HTTPClientSettings: httpCfg, + IdxEndpoint: "https://123.123.32.2:2093", + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + err := test.config.Validate() + t.Logf("%v\n", err) + require.Error(t, err) + require.Contains(t, test.expected.Error(), err.Error()) + }) + } +} diff --git a/receiver/splunkenterprisereceiver/generated_component_test.go b/receiver/splunkenterprisereceiver/generated_component_test.go index 56a98e36226ea..0bd39f68d6d7c 100644 --- a/receiver/splunkenterprisereceiver/generated_component_test.go +++ b/receiver/splunkenterprisereceiver/generated_component_test.go @@ -25,7 +25,19 @@ type assertNoErrorHost struct { var _ component.Host = (*assertNoErrorHost)(nil) -func TestComponentLifecycle(t *testing.T) { +// newAssertNoErrorHost returns a new instance of assertNoErrorHost. +func newAssertNoErrorHost(t *testing.T) component.Host { + return &assertNoErrorHost{ + componenttest.NewNopHost(), + t, + } +} + +func (aneh *assertNoErrorHost) ReportFatalError(err error) { + assert.NoError(aneh, err) +} + +func Test_ComponentLifecycle(t *testing.T) { factory := NewFactory() tests := []struct { diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go index 5b80b4fb6dcc1..7e9248ece59b9 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go @@ -15,7 +15,7 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { if parser == nil { return nil } - err := parser.Unmarshal(ms) + err := parser.Unmarshal(ms, confmap.WithErrorUnused()) if err != nil { return err } diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 59a2b5bdb6fe0..c9a5d8647e9b0 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -43,8 +43,8 @@ func newSplunkMetricsScraper(params receiver.CreateSettings, cfg *Config) splunk } // Create a client instance and add to the splunkScraper -func (s *splunkScraper) start(_ context.Context, h component.Host) (err error) { - client, err := newSplunkEntClient(s.conf, h, s.settings) +func (s *splunkScraper) start(ctx context.Context, h component.Host) (err error) { + client, err := newSplunkEntClient(ctx, s.conf, h, s.settings) if err != nil { return err } @@ -82,6 +82,7 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Each metric has its own scrape function associated with it func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeSh) var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding @@ -1041,6 +1042,7 @@ func unmarshallSearchReq(res *http.Response, sr *searchResponse) error { // Scrape index throughput introspection endpoint func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it indexThroughput var ept string @@ -1082,6 +1084,7 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T // Scrape indexes extended total size func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1136,6 +1139,7 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. // Scrape indexes extended total event count func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1183,6 +1187,7 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon // Scrape indexes extended total bucket count func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1236,6 +1241,7 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo // Scrape indexes extended raw size func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1289,6 +1295,7 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti // Scrape indexes extended bucket event count func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1359,6 +1366,7 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p // Scrape indexes extended bucket hot/warm count func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1422,6 +1430,7 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now // Scrape introspection queues func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues var ept string @@ -1470,6 +1479,7 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm // Scrape introspection queues bytes func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues var ept string diff --git a/receiver/splunkenterprisereceiver/testdata/config.yaml b/receiver/splunkenterprisereceiver/testdata/config.yaml index 48bf9742b4168..c35188fd18481 100644 --- a/receiver/splunkenterprisereceiver/testdata/config.yaml +++ b/receiver/splunkenterprisereceiver/testdata/config.yaml @@ -7,7 +7,7 @@ basicauth/client: splunkenterprise: # required settings auth: basicauth/client # must use basicauthextension - endpoint: "https://localhost:8089" # Optional settings + idx_endpoint: "https://localhost:8089" # Optional settings collection_interval: 10s timeout: 11s # Also optional: metric settings From d22d9200470526f4db250e8c5f000a8e436295b6 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Thu, 18 Jan 2024 16:55:20 -0600 Subject: [PATCH 02/31] refactored client --- receiver/splunkenterprisereceiver/README.md | 19 +++-- receiver/splunkenterprisereceiver/client.go | 91 ++++++++++++++------- receiver/splunkenterprisereceiver/config.go | 38 ++++++--- 3 files changed, 102 insertions(+), 46 deletions(-) diff --git a/receiver/splunkenterprisereceiver/README.md b/receiver/splunkenterprisereceiver/README.md index e45319366868a..7b74db84487ed 100644 --- a/receiver/splunkenterprisereceiver/README.md +++ b/receiver/splunkenterprisereceiver/README.md @@ -8,7 +8,7 @@ jobs. ## Configuration -The following settings are required, omitting them will either cause your receiver to fail to compile or result in 4/5xx return codes during scraping. +The following settings are required, omitting them will either cause your receiver to fail to compile or result in 4/5xx return codes during scraping. These must be set for each Splunk instance type (indexer, search head, or cluster master) from which you wish to pull metrics. At present, only one of each type is accepted, which if done properly should allow for deployment wide metrics to be gathered. * `basicauth` (from [basicauthextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/basicauthextension)): A configured stanza for the basicauthextension. * `auth` (no default): String name referencing your auth extension. @@ -23,16 +23,25 @@ Example: ```yaml extensions: - basicauth/client: + basicauth/indexer: + client_auth: + username: admin + password: securityFirst + basicauth/cluster_master: client_auth: username: admin password: securityFirst receivers: splunkenterprise: - auth: basicauth/client - endpoint: "https://localhost:8089" - timeout: 45s + indexer: + auth: basicauth/indexer + endpoint: "https://localhost:8089" + timeout: 45s + cluster_master: + auth: basicauth/cluster_master + endpoint: "https://localhost:8089" + timeout: 45s ``` For a full list of settings exposed by this receiver please look [here](./config.go) with a detailed configuration [here](./testdata/config.yaml). diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index 12b16b5f84367..ec37ecbf60e86 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -23,39 +23,66 @@ const ( var ( errCtxMissingEndpointType = errors.New("context was passed without the endpoint type included") + errEndpointTypeNotFound = errors.New("requested client is not configured and could not be found in splunkEntClient") ) // Type wrapper for accessing context value type endpointType string -type splunkEntClient struct { - client *http.Client - endpoints map[any]*url.URL +// The splunkEntClient is made up of a number of splunkClients defined for each configured endpoint +type splunkEntClient map[any]*splunkClient + +// The client does not carry the endpoint that is configured with it and golang does not support mixed +// type arrays so this struct contains the pair: the client configured for the endpoint and the endpoint +// itself +type splunkClient struct { + client *http.Client + endpoint *url.URL } func newSplunkEntClient(ctx context.Context, cfg *Config, h component.Host, s component.TelemetrySettings) (*splunkEntClient, error) { - endpoints := make(map[any]*url.URL) - client, err := cfg.HTTPClientSettings.ToClient(h, s) - if err != nil { - return nil, err - } + var err error + var e *url.URL + var c *http.Client + splunkEntClient := make(splunkEntClient) // if the endpoint is defined, put it in the endpoints map for later use // we already checked that url.Parse does not fail in cfg.Validate() - if cfg.IdxEndpoint != "" { - endpoints[typeIdx], _ = url.Parse(cfg.IdxEndpoint) + if cfg.IdxEndpoint.Endpoint != "" { + e, _ = url.Parse(cfg.IdxEndpoint.Endpoint) + c, err = cfg.IdxEndpoint.ToClient(h, s) + if err != nil { + return nil, err + } + splunkEntClient[typeIdx] = &splunkClient{ + client: c, + endpoint: e, + } } - if cfg.SHEndpoint != "" { - endpoints[typeSh], _ = url.Parse(cfg.SHEndpoint) + if cfg.SHEndpoint.Endpoint != "" { + e, _ = url.Parse(cfg.SHEndpoint.Endpoint) + c, err = cfg.SHEndpoint.ToClient(h, s) + if err != nil { + return nil, err + } + splunkEntClient[typeSh] = &splunkClient{ + client: c, + endpoint: e, + } } - if cfg.CMEndpoint != "" { - endpoints[typeCm], _ = url.Parse(cfg.CMEndpoint) + if cfg.CMEndpoint.Endpoint != "" { + e, _ = url.Parse(cfg.CMEndpoint.Endpoint) + c, err = cfg.CMEndpoint.ToClient(h, s) + if err != nil { + return nil, err + } + splunkEntClient[typeCm] = &splunkClient{ + client: c, + endpoint: e, + } } - return &splunkEntClient{ - client: client, - endpoints: endpoints, - }, nil + return &splunkEntClient, nil } // For running ad hoc searches only @@ -70,7 +97,7 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) // this returns a jobid which is then used in the second part to retrieve the search results if sr.Jobid == nil { path := "/services/search/jobs/" - url, _ := url.JoinPath(c.endpoints[eptType].String(), path) + url, _ := url.JoinPath((*c)[eptType].endpoint.String(), path) // reader for the response data data := strings.NewReader(sr.search) @@ -84,7 +111,7 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) return req, nil } path := fmt.Sprintf("/services/search/jobs/%s/results", *sr.Jobid) - url, _ := url.JoinPath(c.endpoints[eptType].String(), path) + url, _ := url.JoinPath((*c)[eptType].endpoint.String(), path) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { @@ -94,6 +121,7 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) return req, nil } +// forms an *http.Request for use with Splunk built-in API's (like introspection). func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint string) (*http.Request, error) { // get endpoint type from the context eptType := ctx.Value(endpointType("type")) @@ -101,7 +129,7 @@ func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint stri return nil, errCtxMissingEndpointType } - url := c.endpoints[eptType].String() + apiEndpoint + url := (*c)[eptType].endpoint.String() + apiEndpoint req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { @@ -111,13 +139,20 @@ func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint stri return req, nil } -// Construct and perform a request to the API. Returns the searchResponse passed into the -// function as state +// Perform a request. func (c *splunkEntClient) makeRequest(req *http.Request) (*http.Response, error) { - res, err := c.client.Do(req) - if err != nil { - return nil, err + // get endpoint type from the context + eptType := req.Context().Value(endpointType("type")) + if eptType == nil { + return nil, errCtxMissingEndpointType + } + if sc, ok := (*c)[eptType]; ok { + res, err := sc.client.Do(req) + if err != nil { + return nil, err + } + return res, nil + } else { + return nil, errEndpointTypeNotFound } - - return res, nil } diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index 996906d29a75d..0f7e359e24a96 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -23,12 +23,11 @@ var ( ) type Config struct { - confighttp.ClientConfig `mapstructure:",squash"` scraperhelper.ScraperControllerSettings `mapstructure:",squash"` metadata.MetricsBuilderConfig `mapstructure:",squash"` - IdxEndpoint string `mapstructure:"idx_endpoint"` - SHEndpoint string `mapstructure:"sh_endpoint"` - CMEndpoint string `mapstructure:"cm_endpoint"` + IdxEndpoint confighttp.HTTPClientSettings `mapstructure:"indexer"` + SHEndpoint confighttp.HTTPClientSettings `mapstructure:"search_head"` + CMEndpoint confighttp.HTTPClientSettings `mapstructure:"cluster_master"` } func (cfg *Config) Validate() (errors error) { @@ -36,19 +35,29 @@ func (cfg *Config) Validate() (errors error) { var err error endpoints := []string{} - if cfg.Endpoint != "" { - errors = multierr.Append(errors, errUnspecifiedEndpoint) - } else if cfg.IdxEndpoint == "" && cfg.SHEndpoint == "" && cfg.CMEndpoint == "" { + // if no endpoint is set we do not start the receiver. For each set endpoint we go through and Validate + // that it contains an auth setting and a valid endpoint, if its missing either of these the receiver will + // fail to start. + if cfg.IdxEndpoint.Endpoint == "" && cfg.SHEndpoint.Endpoint == "" && cfg.CMEndpoint.Endpoint == "" { errors = multierr.Append(errors, errBadOrMissingEndpoint) } else { - if cfg.IdxEndpoint != "" { - endpoints = append(endpoints, cfg.IdxEndpoint) + if cfg.IdxEndpoint.Endpoint != "" { + if cfg.IdxEndpoint.Auth.AuthenticatorID.Name() == "" { + errors = multierr.Append(errors, errMissingAuthExtension) + } + endpoints = append(endpoints, cfg.IdxEndpoint.Endpoint) } - if cfg.SHEndpoint != "" { - endpoints = append(endpoints, cfg.SHEndpoint) + if cfg.SHEndpoint.Endpoint != "" { + if cfg.SHEndpoint.Auth.AuthenticatorID.Name() == "" { + errors = multierr.Append(errors, errMissingAuthExtension) + } + endpoints = append(endpoints, cfg.SHEndpoint.Endpoint) } - if cfg.CMEndpoint != "" { - endpoints = append(endpoints, cfg.CMEndpoint) + if cfg.CMEndpoint.Endpoint != "" { + if cfg.CMEndpoint.Auth.AuthenticatorID.Name() == "" { + errors = multierr.Append(errors, errMissingAuthExtension) + } + endpoints = append(endpoints, cfg.CMEndpoint.Endpoint) } for _, e := range endpoints { @@ -65,9 +74,12 @@ func (cfg *Config) Validate() (errors error) { } } +<<<<<<< HEAD if cfg.ClientConfig.Auth.AuthenticatorID.Name() == "" { errors = multierr.Append(errors, errMissingAuthExtension) } +======= +>>>>>>> 3a6a3c2ce2 (refactored client) return errors } From a7d58a6b4b14f68c87e56541ef658e93605b655a Mon Sep 17 00:00:00 2001 From: shalper2 Date: Mon, 22 Jan 2024 15:30:33 -0600 Subject: [PATCH 03/31] fixed tests --- receiver/splunkenterprisereceiver/client.go | 35 +++++++++++---- .../splunkenterprisereceiver/client_test.go | 24 +++++----- receiver/splunkenterprisereceiver/config.go | 6 +-- .../splunkenterprisereceiver/config_test.go | 44 +++++++++---------- receiver/splunkenterprisereceiver/factory.go | 5 ++- .../splunkenterprisereceiver/factory_test.go | 8 +++- receiver/splunkenterprisereceiver/scraper.go | 4 +- .../splunkenterprisereceiver/scraper_test.go | 20 ++++++--- 8 files changed, 90 insertions(+), 56 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index ec37ecbf60e86..a966cbaa36982 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -24,6 +24,7 @@ const ( var ( errCtxMissingEndpointType = errors.New("context was passed without the endpoint type included") errEndpointTypeNotFound = errors.New("requested client is not configured and could not be found in splunkEntClient") + errNoClientFound = errors.New("no client corresponding to the endpoint type was found") ) // Type wrapper for accessing context value @@ -40,7 +41,7 @@ type splunkClient struct { endpoint *url.URL } -func newSplunkEntClient(ctx context.Context, cfg *Config, h component.Host, s component.TelemetrySettings) (*splunkEntClient, error) { +func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySettings) (*splunkEntClient, error) { var err error var e *url.URL var c *http.Client @@ -86,7 +87,7 @@ func newSplunkEntClient(ctx context.Context, cfg *Config, h component.Host, s co } // For running ad hoc searches only -func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) (*http.Request, error) { +func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) (req *http.Request, err error) { // get endpoint type from the context eptType := ctx.Value(endpointType("type")) if eptType == nil { @@ -96,14 +97,22 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) // Running searches via Splunk's REST API is a two step process: First you submit the job to run // this returns a jobid which is then used in the second part to retrieve the search results if sr.Jobid == nil { + var u string path := "/services/search/jobs/" - url, _ := url.JoinPath((*c)[eptType].endpoint.String(), path) + if e, ok := (*c)[eptType]; !ok { + return nil, errNoClientFound + } else { + u, err = url.JoinPath(e.endpoint.String(), path) + if err != nil { + return nil, err + } + } // reader for the response data data := strings.NewReader(sr.search) // return the build request, ready to be run by makeRequest - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, data) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, data) if err != nil { return nil, err } @@ -113,7 +122,7 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) path := fmt.Sprintf("/services/search/jobs/%s/results", *sr.Jobid) url, _ := url.JoinPath((*c)[eptType].endpoint.String(), path) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + req, err = http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err } @@ -122,16 +131,24 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) } // forms an *http.Request for use with Splunk built-in API's (like introspection). -func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint string) (*http.Request, error) { +func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint string) (req *http.Request, err error) { + var u string + // get endpoint type from the context eptType := ctx.Value(endpointType("type")) if eptType == nil { return nil, errCtxMissingEndpointType } - url := (*c)[eptType].endpoint.String() + apiEndpoint - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if e, ok := (*c)[eptType]; !ok { + return nil, errNoClientFound + } else { + u, err = url.JoinPath(e.endpoint.String(), apiEndpoint) + if err != nil { + return nil, err + } + } + req, err = http.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, err } diff --git a/receiver/splunkenterprisereceiver/client_test.go b/receiver/splunkenterprisereceiver/client_test.go index 83ea4caf1bb48..7a4a7579f7be7 100644 --- a/receiver/splunkenterprisereceiver/client_test.go +++ b/receiver/splunkenterprisereceiver/client_test.go @@ -34,11 +34,9 @@ func (m *mockHost) GetExtensions() map[component.ID]component.Component { func TestClientCreation(t *testing.T) { cfg := &Config{ - ClientConfig: confighttp.ClientConfig{ + IdxEndpoint: confighttp.HTTPClientSettings{ Endpoint: "https://localhost:8089", - Auth: &configauth.Authentication{ - AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), - }, + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("basicauth/client")}, }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, @@ -58,18 +56,22 @@ func TestClientCreation(t *testing.T) { testEndpoint, _ := url.Parse("https://localhost:8089") - require.Equal(t, client.endpoint, testEndpoint) + require.Equal(t, testEndpoint, (*client)[typeIdx].endpoint) } // test functionality of createRequest which is used for building metrics out of // ad-hoc searches func TestClientCreateRequest(t *testing.T) { cfg := &Config{ - ClientConfig: confighttp.ClientConfig{ + IdxEndpoint: confighttp.HTTPClientSettings{ Endpoint: "https://localhost:8089", +<<<<<<< HEAD Auth: &configauth.Authentication{ AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), }, +======= + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("basicauth/client")}, +>>>>>>> 49e204e471 (fixed tests) }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, @@ -131,6 +133,7 @@ func TestClientCreateRequest(t *testing.T) { } ctx := context.Background() + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) for _, test := range tests { t.Run(test.desc, func(t *testing.T) { req, err := test.client.createRequest(ctx, test.sr) @@ -147,11 +150,9 @@ func TestClientCreateRequest(t *testing.T) { // createAPIRequest creates a request for api calls i.e. to introspection endpoint func TestAPIRequestCreate(t *testing.T) { cfg := &Config{ - ClientConfig: confighttp.ClientConfig{ + IdxEndpoint: confighttp.HTTPClientSettings{ Endpoint: "https://localhost:8089", - Auth: &configauth.Authentication{ - AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), - }, + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("basicauth/client")}, }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, @@ -171,11 +172,12 @@ func TestAPIRequestCreate(t *testing.T) { require.NoError(t, err) ctx := context.Background() + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) req, err := client.createAPIRequest(ctx, "/test/endpoint") require.NoError(t, err) // build the expected request - expectedURL := client.endpoint.String() + "/test/endpoint" + expectedURL := (*client)[typeIdx].endpoint.String() + "/test/endpoint" expected, _ := http.NewRequest(http.MethodGet, expectedURL, nil) require.Equal(t, expected.URL, req.URL) diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index 0f7e359e24a96..92d86d757c17e 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -42,19 +42,19 @@ func (cfg *Config) Validate() (errors error) { errors = multierr.Append(errors, errBadOrMissingEndpoint) } else { if cfg.IdxEndpoint.Endpoint != "" { - if cfg.IdxEndpoint.Auth.AuthenticatorID.Name() == "" { + if cfg.IdxEndpoint.Auth == nil { errors = multierr.Append(errors, errMissingAuthExtension) } endpoints = append(endpoints, cfg.IdxEndpoint.Endpoint) } if cfg.SHEndpoint.Endpoint != "" { - if cfg.SHEndpoint.Auth.AuthenticatorID.Name() == "" { + if cfg.SHEndpoint.Auth == nil { errors = multierr.Append(errors, errMissingAuthExtension) } endpoints = append(endpoints, cfg.SHEndpoint.Endpoint) } if cfg.CMEndpoint.Endpoint != "" { - if cfg.CMEndpoint.Auth.AuthenticatorID.Name() == "" { + if cfg.CMEndpoint.Auth == nil { errors = multierr.Append(errors, errMissingAuthExtension) } endpoints = append(endpoints, cfg.CMEndpoint.Endpoint) diff --git a/receiver/splunkenterprisereceiver/config_test.go b/receiver/splunkenterprisereceiver/config_test.go index 9ec2adcb8a0bc..a351e91f730e3 100644 --- a/receiver/splunkenterprisereceiver/config_test.go +++ b/receiver/splunkenterprisereceiver/config_test.go @@ -37,19 +37,13 @@ func TestEndpointCorrectness(t *testing.T) { errBad = multierr.Append(errBad, errMissingAuthExtension) // Errors related to setting the wrong endpoint field (i.e. the one from httpconfig) - errMisconf = multierr.Append(errMisconf, errUnspecifiedEndpoint) errMisconf = multierr.Append(errMisconf, errMissingAuthExtension) + errMisconf = multierr.Append(errMisconf, errUnspecifiedEndpoint) // Error related to bad scheme (not http/s) errScheme = multierr.Append(errScheme, errBadScheme) errScheme = multierr.Append(errScheme, errMissingAuthExtension) - httpCfg := confighttp.NewDefaultHTTPClientSettings() - httpCfg.Auth = &configauth.Authentication{AuthenticatorID: component.NewID("dummy")} - httpCfgWithEndpoint := httpCfg - httpCfgWithEndpoint.Endpoint = "https://123.123.32.2:2093" - httpCfgWithEndpoint.Auth = &configauth.Authentication{AuthenticatorID: component.NewID("dummy")} - tests := []struct { desc string expected error @@ -59,38 +53,44 @@ func TestEndpointCorrectness(t *testing.T) { desc: "missing any endpoint setting", expected: errBad, config: &Config{ - HTTPClientSettings: httpCfg, - }, - }, - { - desc: "configured the wrong endpoint field (httpconfig.Endpoint)", - expected: errMisconf, - config: &Config{ - HTTPClientSettings: httpCfgWithEndpoint, + IdxEndpoint: confighttp.HTTPClientSettings{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + }, + SHEndpoint: confighttp.HTTPClientSettings{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + }, + CMEndpoint: confighttp.HTTPClientSettings{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + }, }, }, { desc: "properly configured invalid endpoint", expected: errBad, config: &Config{ - HTTPClientSettings: httpCfg, - IdxEndpoint: "123.12.23.43:80", + IdxEndpoint: confighttp.HTTPClientSettings{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + Endpoint: "123.321.12.1:1", + }, }, }, { desc: "properly configured endpoint has bad scheme", expected: errScheme, config: &Config{ - HTTPClientSettings: httpCfg, - IdxEndpoint: "gss://123.124.32.12:90", + IdxEndpoint: confighttp.HTTPClientSettings{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + Endpoint: "gss://123.124.32.12:90", + }, }, }, { - desc: "properly configured endpoint", + desc: "properly configured endpoint missing auth", expected: errMissingAuthExtension, config: &Config{ - HTTPClientSettings: httpCfg, - IdxEndpoint: "https://123.123.32.2:2093", + IdxEndpoint: confighttp.HTTPClientSettings{ + Endpoint: "https://123.123.32.2:2093", + }, }, }, } diff --git a/receiver/splunkenterprisereceiver/factory.go b/receiver/splunkenterprisereceiver/factory.go index 7c9a021730e3c..d0b9343c63a14 100644 --- a/receiver/splunkenterprisereceiver/factory.go +++ b/receiver/splunkenterprisereceiver/factory.go @@ -28,6 +28,7 @@ func createDefaultConfig() component.Config { httpCfg.Headers = map[string]configopaque.String{ "Content-Type": "application/x-www-form-urlencoded", } + httpCfg.Timeout = defaultMaxSearchWaitTime // Default ScraperController settings scfg := scraperhelper.NewDefaultScraperControllerSettings(metadata.Type) @@ -35,7 +36,9 @@ func createDefaultConfig() component.Config { scfg.Timeout = defaultMaxSearchWaitTime return &Config{ - ClientConfig: httpCfg, + IdxEndpoint: httpCfg, + SHEndpoint: httpCfg, + CMEndpoint: httpCfg, ScraperControllerSettings: scfg, MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } diff --git a/receiver/splunkenterprisereceiver/factory_test.go b/receiver/splunkenterprisereceiver/factory_test.go index 17031d573961a..cb8617917b3c7 100644 --- a/receiver/splunkenterprisereceiver/factory_test.go +++ b/receiver/splunkenterprisereceiver/factory_test.go @@ -28,9 +28,12 @@ func TestDefaultConfig(t *testing.T) { cfg.Headers = map[string]configopaque.String{ "Content-Type": "application/x-www-form-urlencoded", } + cfg.Timeout = 60 * time.Second expectedConf := &Config{ - ClientConfig: cfg, + IdxEndpoint: cfg, + SHEndpoint: cfg, + CMEndpoint: cfg, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Minute, InitialDelay: 1 * time.Second, @@ -55,6 +58,9 @@ func TestCreateMetricsReceiver(t *testing.T) { t.Parallel() cfg := createDefaultConfig().(*Config) + cfg.CMEndpoint.Endpoint = "https://123.12.12.12:80" + cfg.IdxEndpoint.Endpoint = "https://123.12.12.12:80" + cfg.SHEndpoint.Endpoint = "https://123.12.12.12:80" _, err := createMetricsReceiver( context.Background(), diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index c9a5d8647e9b0..0753c6a89ef5f 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -43,8 +43,8 @@ func newSplunkMetricsScraper(params receiver.CreateSettings, cfg *Config) splunk } // Create a client instance and add to the splunkScraper -func (s *splunkScraper) start(ctx context.Context, h component.Host) (err error) { - client, err := newSplunkEntClient(ctx, s.conf, h, s.settings) +func (s *splunkScraper) start(_ context.Context, h component.Host) (err error) { + client, err := newSplunkEntClient(s.conf, h, s.settings) if err != nil { return err } diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index c0ae5d7c624a2..f6001724f4ad5 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -52,11 +52,11 @@ func mockIntrospectionQueues(w http.ResponseWriter, _ *http.Request) { func createMockServer() *httptest.Server { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch strings.TrimSpace(r.URL.Path) { - case "/services/server/introspection/indexer": + case "/services/server/introspection/indexer?output_mode=json": mockIndexerThroughput(w, r) - case "/services/data/indexes-extended": + case "/services/data/indexes-extended?output_mode=json&count=-1": mockIndexesExtended(w, r) - case "/services/server/introspection/queues": + case "/services/server/introspection/queues?output_mode=json&count=-1": mockIntrospectionQueues(w, r) default: http.NotFoundHandler().ServeHTTP(w, r) @@ -84,11 +84,17 @@ func TestScraper(t *testing.T) { metricsettings.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled = true cfg := &Config{ - ClientConfig: confighttp.ClientConfig{ + IdxEndpoint: confighttp.HTTPClientSettings{ Endpoint: ts.URL, - Auth: &configauth.Authentication{ - AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), - }, + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("basicauth/client")}, + }, + SHEndpoint: confighttp.HTTPClientSettings{ + Endpoint: ts.URL, + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("basicauth/client")}, + }, + CMEndpoint: confighttp.HTTPClientSettings{ + Endpoint: ts.URL, + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("basicauth/client")}, }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, From 1b7051fde3dadaaf39c5551ebf5cdbbea1dc2245 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Mon, 22 Jan 2024 15:48:25 -0600 Subject: [PATCH 04/31] added chloggen --- .chloggen/splunkent-update-conf.yaml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 .chloggen/splunkent-update-conf.yaml diff --git a/.chloggen/splunkent-update-conf.yaml b/.chloggen/splunkent-update-conf.yaml new file mode 100755 index 0000000000000..8fb017005b6fd --- /dev/null +++ b/.chloggen/splunkent-update-conf.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: splunkentreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Updated the config.go and propogated these changes to other receiver components. Change was necessary to differentiate different configurable endpoints." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30254] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] From b2e181dd191917fee0dd5b85ab44a8e6a1cb66fd Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 24 Jan 2024 13:57:58 -0600 Subject: [PATCH 05/31] updated example confs --- receiver/splunkenterprisereceiver/README.md | 17 +++++++++++++++-- .../testdata/config.yaml | 18 ++++++++++++------ 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/receiver/splunkenterprisereceiver/README.md b/receiver/splunkenterprisereceiver/README.md index 7b74db84487ed..d194d1d7cfeb0 100644 --- a/receiver/splunkenterprisereceiver/README.md +++ b/receiver/splunkenterprisereceiver/README.md @@ -35,13 +35,26 @@ extensions: receivers: splunkenterprise: indexer: - auth: basicauth/indexer + auth: + authenticator: basicauth/indexer endpoint: "https://localhost:8089" timeout: 45s cluster_master: - auth: basicauth/cluster_master + auth: + authenticator: basicauth/cluster_master endpoint: "https://localhost:8089" timeout: 45s + +exporters: + logging: + loglevel: info + +service: + extensions: [basicauth/indexer, basicauth/cluster_master] + pipelines: + metrics: + receivers: [splunkenterprise] + exporters: [logging] ``` For a full list of settings exposed by this receiver please look [here](./config.go) with a detailed configuration [here](./testdata/config.yaml). diff --git a/receiver/splunkenterprisereceiver/testdata/config.yaml b/receiver/splunkenterprisereceiver/testdata/config.yaml index c35188fd18481..1c3bb2455d1b1 100644 --- a/receiver/splunkenterprisereceiver/testdata/config.yaml +++ b/receiver/splunkenterprisereceiver/testdata/config.yaml @@ -1,15 +1,21 @@ # Example config for the Splunk Enterprise Receiver. -basicauth/client: +basicauth/search_head: client_auth: username: admin password: securityFirst +basicauth/indexer: + client_auth: + username: admin + password: securityFirst1! splunkenterprise: - # required settings - auth: basicauth/client # must use basicauthextension - idx_endpoint: "https://localhost:8089" # Optional settings - collection_interval: 10s - timeout: 11s + indexer: + auth: + authenticator: basicauth/indexer + timeout: 10 + search_head: + auth: + authenticator: basicauth/search_head # Also optional: metric settings metrics: splunk.license.index.usage: From ca99caced4ce556817aed5d4066012681df9152f Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 30 Jan 2024 10:16:04 -0600 Subject: [PATCH 06/31] added check for unconfigured endpoint in scrape --- receiver/splunkenterprisereceiver/README.md | 2 +- receiver/splunkenterprisereceiver/client.go | 7 ++ receiver/splunkenterprisereceiver/scraper.go | 78 ++++++++++---------- 3 files changed, 47 insertions(+), 40 deletions(-) diff --git a/receiver/splunkenterprisereceiver/README.md b/receiver/splunkenterprisereceiver/README.md index d194d1d7cfeb0..ad36fde47cea0 100644 --- a/receiver/splunkenterprisereceiver/README.md +++ b/receiver/splunkenterprisereceiver/README.md @@ -8,7 +8,7 @@ jobs. ## Configuration -The following settings are required, omitting them will either cause your receiver to fail to compile or result in 4/5xx return codes during scraping. These must be set for each Splunk instance type (indexer, search head, or cluster master) from which you wish to pull metrics. At present, only one of each type is accepted, which if done properly should allow for deployment wide metrics to be gathered. +The following settings are required, omitting them will either cause your receiver to fail to compile or result in 4/5xx return codes during scraping. These must be set for each Splunk instance type (indexer, search head, or cluster master) from which you wish to pull metrics. At present, only one of each type is accepted, per configured receiver instance. * `basicauth` (from [basicauthextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/basicauthextension)): A configured stanza for the basicauthextension. * `auth` (no default): String name referencing your auth extension. diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index a966cbaa36982..37eaa19627bf0 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -173,3 +173,10 @@ func (c *splunkEntClient) makeRequest(req *http.Request) (*http.Response, error) return nil, errEndpointTypeNotFound } } + +// Check if the splunkEntClient contains a configured endpoint for the type of scraper +// Returns true if an entry exists, false if not. +func (c *splunkEntClient) isConfigured(v string) bool { + _, ok := (*c)[v] + return ok +} diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 0753c6a89ef5f..06cbce8594945 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -82,13 +82,13 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Each metric has its own scrape function associated with it func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled || !s.splunkClient.isConfigured(typeSh) { + return + } ctx = context.WithValue(ctx, endpointType("type"), typeSh) var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding - if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled { - return - } sr = searchResponse{ search: searchDict[`SplunkLicenseIndexUsageSearch`], @@ -1042,14 +1042,14 @@ func unmarshallSearchReq(res *http.Response, sr *searchResponse) error { // Scrape index throughput introspection endpoint func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it indexThroughput var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled { - return - } - ept = apiDict[`SplunkIndexerThroughput`] req, err := s.splunkClient.createAPIRequest(ctx, ept) @@ -1084,14 +1084,14 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T // Scrape indexes extended total size func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled { - return - } - ept = apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) @@ -1139,14 +1139,14 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. // Scrape indexes extended total event count func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled { - return - } - ept = apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) @@ -1187,14 +1187,14 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon // Scrape indexes extended total bucket count func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled { - return - } - ept = apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) @@ -1241,14 +1241,14 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo // Scrape indexes extended raw size func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled { - return - } - ept = apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) @@ -1295,14 +1295,14 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti // Scrape indexes extended bucket event count func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled { - return - } - ept = apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) @@ -1366,14 +1366,14 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p // Scrape indexes extended bucket hot/warm count func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled { - return - } - ept = apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) @@ -1430,14 +1430,14 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now // Scrape introspection queues func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled { - return - } - ept = apiDict[`SplunkIntrospectionQueues`] req, err := s.splunkClient.createAPIRequest(ctx, ept) @@ -1479,14 +1479,14 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm // Scrape introspection queues bytes func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled || !s.splunkClient.isConfigured(typeIdx) { + return + } + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues var ept string - if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled { - return - } - ept = apiDict[`SplunkIntrospectionQueues`] req, err := s.splunkClient.createAPIRequest(ctx, ept) From f564d0e6324fcae895ff2d018fd1546e0be91b40 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 30 Jan 2024 11:43:58 -0600 Subject: [PATCH 07/31] fixed types for scrapes --- receiver/splunkenterprisereceiver/scraper.go | 40 ++++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 06cbce8594945..3e4f29a99ed46 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -82,10 +82,10 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Each metric has its own scrape function associated with it func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled || !s.splunkClient.isConfigured(typeSh) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeSh) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding @@ -1042,11 +1042,11 @@ func unmarshallSearchReq(res *http.Response, sr *searchResponse) error { // Scrape index throughput introspection endpoint func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it indexThroughput var ept string @@ -1084,11 +1084,11 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T // Scrape indexes extended total size func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended var ept string @@ -1139,11 +1139,11 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. // Scrape indexes extended total event count func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended var ept string @@ -1187,11 +1187,11 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon // Scrape indexes extended total bucket count func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended var ept string @@ -1241,11 +1241,11 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo // Scrape indexes extended raw size func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended var ept string @@ -1295,11 +1295,11 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti // Scrape indexes extended bucket event count func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended var ept string @@ -1366,11 +1366,11 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p // Scrape indexes extended bucket hot/warm count func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended var ept string @@ -1430,11 +1430,11 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now // Scrape introspection queues func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IntrospectionQueues var ept string @@ -1479,11 +1479,11 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm // Scrape introspection queues bytes func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled || !s.splunkClient.isConfigured(typeIdx) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled || !s.splunkClient.isConfigured(typeCm) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IntrospectionQueues var ept string From 414915fee26cdd39e4588973b2cb12b08376dea6 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 31 Jan 2024 09:32:37 -0600 Subject: [PATCH 08/31] fixed url issue --- receiver/splunkenterprisereceiver/client.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index 37eaa19627bf0..171af752872ad 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -143,10 +143,7 @@ func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint stri if e, ok := (*c)[eptType]; !ok { return nil, errNoClientFound } else { - u, err = url.JoinPath(e.endpoint.String(), apiEndpoint) - if err != nil { - return nil, err - } + u = e.endpoint.String() + apiEndpoint } req, err = http.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { From ea34ad8b2056042abc8ef97cad161af8fc83a495 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 31 Jan 2024 09:41:48 -0600 Subject: [PATCH 09/31] metric endpoint types --- receiver/splunkenterprisereceiver/scraper.go | 36 ++++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 3e4f29a99ed46..4d7d97292fb6d 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -1042,11 +1042,11 @@ func unmarshallSearchReq(res *http.Response, sr *searchResponse) error { // Scrape index throughput introspection endpoint func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it indexThroughput var ept string @@ -1084,11 +1084,11 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T // Scrape indexes extended total size func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1139,11 +1139,11 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. // Scrape indexes extended total event count func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1187,11 +1187,11 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon // Scrape indexes extended total bucket count func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1241,11 +1241,11 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo // Scrape indexes extended raw size func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1295,11 +1295,11 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti // Scrape indexes extended bucket event count func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1366,11 +1366,11 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p // Scrape indexes extended bucket hot/warm count func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended var ept string @@ -1430,11 +1430,11 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now // Scrape introspection queues func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues var ept string @@ -1479,11 +1479,11 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm // Scrape introspection queues bytes func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled || !s.splunkClient.isConfigured(typeCm) { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues var ept string From 638751e2717d88f8e523c5041338adb91b7fc788 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 13 Feb 2024 09:42:19 -0600 Subject: [PATCH 10/31] fixed tests --- receiver/splunkenterprisereceiver/client_test.go | 12 +++--------- receiver/splunkenterprisereceiver/config.go | 7 ------- .../generated_component_test.go | 1 + receiver/splunkenterprisereceiver/scraper_test.go | 5 ++--- 4 files changed, 6 insertions(+), 19 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client_test.go b/receiver/splunkenterprisereceiver/client_test.go index 7a4a7579f7be7..1233750a42503 100644 --- a/receiver/splunkenterprisereceiver/client_test.go +++ b/receiver/splunkenterprisereceiver/client_test.go @@ -47,7 +47,7 @@ func TestClientCreation(t *testing.T) { host := &mockHost{ extensions: map[component.ID]component.Component{ - component.MustNewIDWithName("basicauth", "client"): auth.NewClient(), + component.NewID("basicauth/client"): auth.NewClient(), }, } // create a client from an example config @@ -65,13 +65,7 @@ func TestClientCreateRequest(t *testing.T) { cfg := &Config{ IdxEndpoint: confighttp.HTTPClientSettings{ Endpoint: "https://localhost:8089", -<<<<<<< HEAD - Auth: &configauth.Authentication{ - AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), - }, -======= Auth: &configauth.Authentication{AuthenticatorID: component.NewID("basicauth/client")}, ->>>>>>> 49e204e471 (fixed tests) }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, @@ -82,7 +76,7 @@ func TestClientCreateRequest(t *testing.T) { host := &mockHost{ extensions: map[component.ID]component.Component{ - component.MustNewIDWithName("basicauth", "client"): auth.NewClient(), + component.NewID("basicauth/client"): auth.NewClient(), }, } // create a client from an example config @@ -163,7 +157,7 @@ func TestAPIRequestCreate(t *testing.T) { host := &mockHost{ extensions: map[component.ID]component.Component{ - component.MustNewIDWithName("basicauth", "client"): auth.NewClient(), + component.NewID("basicauth/client"): auth.NewClient(), }, } // create a client from an example config diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index 92d86d757c17e..75b660c8894c8 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -74,12 +74,5 @@ func (cfg *Config) Validate() (errors error) { } } -<<<<<<< HEAD - if cfg.ClientConfig.Auth.AuthenticatorID.Name() == "" { - errors = multierr.Append(errors, errMissingAuthExtension) - } - -======= ->>>>>>> 3a6a3c2ce2 (refactored client) return errors } diff --git a/receiver/splunkenterprisereceiver/generated_component_test.go b/receiver/splunkenterprisereceiver/generated_component_test.go index 0bd39f68d6d7c..a189c378e1960 100644 --- a/receiver/splunkenterprisereceiver/generated_component_test.go +++ b/receiver/splunkenterprisereceiver/generated_component_test.go @@ -6,6 +6,7 @@ import ( "context" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index f6001724f4ad5..08086a6a3ee1e 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -8,7 +8,6 @@ import ( "net/http" "net/http/httptest" "path/filepath" - "strings" "testing" "time" @@ -51,7 +50,7 @@ func mockIntrospectionQueues(w http.ResponseWriter, _ *http.Request) { // mock server create func createMockServer() *httptest.Server { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch strings.TrimSpace(r.URL.Path) { + switch r.URL.String() { case "/services/server/introspection/indexer?output_mode=json": mockIndexerThroughput(w, r) case "/services/data/indexes-extended?output_mode=json&count=-1": @@ -106,7 +105,7 @@ func TestScraper(t *testing.T) { host := &mockHost{ extensions: map[component.ID]component.Component{ - component.MustNewIDWithName("basicauth", "client"): auth.NewClient(), + component.NewID("basicauth/client"): auth.NewClient(), }, } From 02e91adf52b2eed06c7b672fa8fa816346cdc7f3 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 13 Feb 2024 09:51:24 -0600 Subject: [PATCH 11/31] modified client to avoid pointer party --- receiver/splunkenterprisereceiver/client.go | 27 +++++++++++-------- .../splunkenterprisereceiver/client_test.go | 4 +-- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index 171af752872ad..3b335762b8388 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -30,8 +30,13 @@ var ( // Type wrapper for accessing context value type endpointType string +// Wrapper around splunkClientMap to avoid awkward reference/dereference stuff that arises when using maps in golang +type splunkEntClient struct { + clients splunkClientMap +} + // The splunkEntClient is made up of a number of splunkClients defined for each configured endpoint -type splunkEntClient map[any]*splunkClient +type splunkClientMap map[any]splunkClient // The client does not carry the endpoint that is configured with it and golang does not support mixed // type arrays so this struct contains the pair: the client configured for the endpoint and the endpoint @@ -45,7 +50,7 @@ func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySett var err error var e *url.URL var c *http.Client - splunkEntClient := make(splunkEntClient) + clientMap := make(splunkClientMap) // if the endpoint is defined, put it in the endpoints map for later use // we already checked that url.Parse does not fail in cfg.Validate() @@ -55,7 +60,7 @@ func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySett if err != nil { return nil, err } - splunkEntClient[typeIdx] = &splunkClient{ + clientMap[typeIdx] = splunkClient{ client: c, endpoint: e, } @@ -66,7 +71,7 @@ func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySett if err != nil { return nil, err } - splunkEntClient[typeSh] = &splunkClient{ + clientMap[typeSh] = splunkClient{ client: c, endpoint: e, } @@ -77,13 +82,13 @@ func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySett if err != nil { return nil, err } - splunkEntClient[typeCm] = &splunkClient{ + clientMap[typeCm] = splunkClient{ client: c, endpoint: e, } } - return &splunkEntClient, nil + return &splunkEntClient{clients: clientMap}, nil } // For running ad hoc searches only @@ -99,7 +104,7 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) if sr.Jobid == nil { var u string path := "/services/search/jobs/" - if e, ok := (*c)[eptType]; !ok { + if e, ok := c.clients[eptType]; !ok { return nil, errNoClientFound } else { u, err = url.JoinPath(e.endpoint.String(), path) @@ -120,7 +125,7 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) return req, nil } path := fmt.Sprintf("/services/search/jobs/%s/results", *sr.Jobid) - url, _ := url.JoinPath((*c)[eptType].endpoint.String(), path) + url, _ := url.JoinPath(c.clients[eptType].endpoint.String(), path) req, err = http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { @@ -140,7 +145,7 @@ func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint stri return nil, errCtxMissingEndpointType } - if e, ok := (*c)[eptType]; !ok { + if e, ok := c.clients[eptType]; !ok { return nil, errNoClientFound } else { u = e.endpoint.String() + apiEndpoint @@ -160,7 +165,7 @@ func (c *splunkEntClient) makeRequest(req *http.Request) (*http.Response, error) if eptType == nil { return nil, errCtxMissingEndpointType } - if sc, ok := (*c)[eptType]; ok { + if sc, ok := c.clients[eptType]; ok { res, err := sc.client.Do(req) if err != nil { return nil, err @@ -174,6 +179,6 @@ func (c *splunkEntClient) makeRequest(req *http.Request) (*http.Response, error) // Check if the splunkEntClient contains a configured endpoint for the type of scraper // Returns true if an entry exists, false if not. func (c *splunkEntClient) isConfigured(v string) bool { - _, ok := (*c)[v] + _, ok := c.clients[v] return ok } diff --git a/receiver/splunkenterprisereceiver/client_test.go b/receiver/splunkenterprisereceiver/client_test.go index 1233750a42503..3fc1f08d6532a 100644 --- a/receiver/splunkenterprisereceiver/client_test.go +++ b/receiver/splunkenterprisereceiver/client_test.go @@ -56,7 +56,7 @@ func TestClientCreation(t *testing.T) { testEndpoint, _ := url.Parse("https://localhost:8089") - require.Equal(t, testEndpoint, (*client)[typeIdx].endpoint) + require.Equal(t, testEndpoint, client.clients[typeIdx].endpoint) } // test functionality of createRequest which is used for building metrics out of @@ -171,7 +171,7 @@ func TestAPIRequestCreate(t *testing.T) { require.NoError(t, err) // build the expected request - expectedURL := (*client)[typeIdx].endpoint.String() + "/test/endpoint" + expectedURL := client.clients[typeIdx].endpoint.String() + "/test/endpoint" expected, _ := http.NewRequest(http.MethodGet, expectedURL, nil) require.Equal(t, expected.URL, req.URL) From 79a205d031c8d116984e0225b06221fb43f7d61b Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 14 Feb 2024 14:43:23 -0600 Subject: [PATCH 12/31] make generate --- receiver/splunkenterprisereceiver/config.go | 3 +-- receiver/splunkenterprisereceiver/config_test.go | 5 ++--- receiver/splunkenterprisereceiver/factory.go | 3 +-- receiver/splunkenterprisereceiver/factory_test.go | 3 +-- .../internal/metadata/generated_config.go | 2 +- receiver/splunkenterprisereceiver/scraper.go | 3 +-- receiver/splunkenterprisereceiver/scraper_test.go | 7 +++---- 7 files changed, 10 insertions(+), 16 deletions(-) diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index 75b660c8894c8..440c50e2140bc 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -8,11 +8,10 @@ import ( "net/url" "strings" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/multierr" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) var ( diff --git a/receiver/splunkenterprisereceiver/config_test.go b/receiver/splunkenterprisereceiver/config_test.go index a351e91f730e3..4903f06985f6e 100644 --- a/receiver/splunkenterprisereceiver/config_test.go +++ b/receiver/splunkenterprisereceiver/config_test.go @@ -7,14 +7,13 @@ import ( "path/filepath" "testing" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap/confmaptest" "go.uber.org/multierr" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) func TestLoadConfig(t *testing.T) { @@ -33,7 +32,7 @@ func TestEndpointCorrectness(t *testing.T) { // Error for bad or missing endpoint errBad = multierr.Append(errBad, errBadOrMissingEndpoint) // There is no way with the current SDK design to create a test config that - // satisfies the auth extention so we will just expect this error to appear. + // satisfies the auth extension so we will just expect this error to appear. errBad = multierr.Append(errBad, errMissingAuthExtension) // Errors related to setting the wrong endpoint field (i.e. the one from httpconfig) diff --git a/receiver/splunkenterprisereceiver/factory.go b/receiver/splunkenterprisereceiver/factory.go index d0b9343c63a14..37cada962c753 100644 --- a/receiver/splunkenterprisereceiver/factory.go +++ b/receiver/splunkenterprisereceiver/factory.go @@ -7,14 +7,13 @@ import ( "context" "time" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) const ( diff --git a/receiver/splunkenterprisereceiver/factory_test.go b/receiver/splunkenterprisereceiver/factory_test.go index cb8617917b3c7..f307aa45b392f 100644 --- a/receiver/splunkenterprisereceiver/factory_test.go +++ b/receiver/splunkenterprisereceiver/factory_test.go @@ -8,14 +8,13 @@ import ( "testing" "time" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/receiver/receivertest" "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) func TestFactoryCreate(t *testing.T) { diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go index 7e9248ece59b9..5b80b4fb6dcc1 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go @@ -15,7 +15,7 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { if parser == nil { return nil } - err := parser.Unmarshal(ms, confmap.WithErrorUnused()) + err := parser.Unmarshal(ms) if err != nil { return err } diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 4d7d97292fb6d..36c5bd0c541ee 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -14,13 +14,12 @@ import ( "strconv" "time" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scrapererror" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) var ( diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index 08086a6a3ee1e..db3eabd4c59b6 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -11,6 +11,9 @@ import ( "testing" "time" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" @@ -19,10 +22,6 @@ import ( "go.opentelemetry.io/collector/extension/auth" "go.opentelemetry.io/collector/receiver/receivertest" "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) // handler function for mock server From ca050b3c4d5009dd2d73f6289ca97dc5d50eaccb Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 14 Feb 2024 15:17:47 -0600 Subject: [PATCH 13/31] linted --- receiver/splunkenterprisereceiver/client.go | 19 ++++++----- receiver/splunkenterprisereceiver/config.go | 4 +-- .../splunkenterprisereceiver/config_test.go | 9 ++--- receiver/splunkenterprisereceiver/factory.go | 3 +- .../splunkenterprisereceiver/factory_test.go | 3 +- receiver/splunkenterprisereceiver/scraper.go | 34 +++++++------------ .../splunkenterprisereceiver/scraper_test.go | 7 ++-- 7 files changed, 35 insertions(+), 44 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index 3b335762b8388..6c0bb419f7663 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -104,20 +104,21 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) if sr.Jobid == nil { var u string path := "/services/search/jobs/" - if e, ok := c.clients[eptType]; !ok { - return nil, errNoClientFound - } else { + + if e, ok := c.clients[eptType]; ok { u, err = url.JoinPath(e.endpoint.String(), path) if err != nil { return nil, err } + } else { + return nil, errNoClientFound } // reader for the response data data := strings.NewReader(sr.search) // return the build request, ready to be run by makeRequest - req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, data) + req, err = http.NewRequestWithContext(ctx, http.MethodPost, u, data) if err != nil { return nil, err } @@ -145,11 +146,12 @@ func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint stri return nil, errCtxMissingEndpointType } - if e, ok := c.clients[eptType]; !ok { - return nil, errNoClientFound - } else { + if e, ok := c.clients[eptType]; ok { u = e.endpoint.String() + apiEndpoint + } else { + return nil, errNoClientFound } + req, err = http.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, err @@ -171,9 +173,8 @@ func (c *splunkEntClient) makeRequest(req *http.Request) (*http.Response, error) return nil, err } return res, nil - } else { - return nil, errEndpointTypeNotFound } + return nil, errEndpointTypeNotFound } // Check if the splunkEntClient contains a configured endpoint for the type of scraper diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index 440c50e2140bc..4f430c1be8d6d 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -8,14 +8,14 @@ import ( "net/url" "strings" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) var ( - errUnspecifiedEndpoint = errors.New("endpoint to an unspecified node type") errBadOrMissingEndpoint = errors.New("missing a valid endpoint") errBadScheme = errors.New("endpoint scheme must be either http or https") errMissingAuthExtension = errors.New("auth extension missing from config") diff --git a/receiver/splunkenterprisereceiver/config_test.go b/receiver/splunkenterprisereceiver/config_test.go index 4903f06985f6e..31c22579a24ce 100644 --- a/receiver/splunkenterprisereceiver/config_test.go +++ b/receiver/splunkenterprisereceiver/config_test.go @@ -7,13 +7,14 @@ import ( "path/filepath" "testing" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap/confmaptest" "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) func TestLoadConfig(t *testing.T) { @@ -28,17 +29,13 @@ func TestLoadConfig(t *testing.T) { func TestEndpointCorrectness(t *testing.T) { // Declare errors for tests that should fail - var errBad, errMisconf, errScheme error + var errBad, errScheme error // Error for bad or missing endpoint errBad = multierr.Append(errBad, errBadOrMissingEndpoint) // There is no way with the current SDK design to create a test config that // satisfies the auth extension so we will just expect this error to appear. errBad = multierr.Append(errBad, errMissingAuthExtension) - // Errors related to setting the wrong endpoint field (i.e. the one from httpconfig) - errMisconf = multierr.Append(errMisconf, errMissingAuthExtension) - errMisconf = multierr.Append(errMisconf, errUnspecifiedEndpoint) - // Error related to bad scheme (not http/s) errScheme = multierr.Append(errScheme, errBadScheme) errScheme = multierr.Append(errScheme, errMissingAuthExtension) diff --git a/receiver/splunkenterprisereceiver/factory.go b/receiver/splunkenterprisereceiver/factory.go index 37cada962c753..d0b9343c63a14 100644 --- a/receiver/splunkenterprisereceiver/factory.go +++ b/receiver/splunkenterprisereceiver/factory.go @@ -7,13 +7,14 @@ import ( "context" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) const ( diff --git a/receiver/splunkenterprisereceiver/factory_test.go b/receiver/splunkenterprisereceiver/factory_test.go index f307aa45b392f..cb8617917b3c7 100644 --- a/receiver/splunkenterprisereceiver/factory_test.go +++ b/receiver/splunkenterprisereceiver/factory_test.go @@ -8,13 +8,14 @@ import ( "testing" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/receiver/receivertest" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) func TestFactoryCreate(t *testing.T) { diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 36c5bd0c541ee..f73d9f4699d37 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -14,12 +14,13 @@ import ( "strconv" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scrapererror" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) var ( @@ -85,11 +86,10 @@ func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcomm return } ctx = context.WithValue(ctx, endpointType("type"), typeCm) - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkLicenseIndexUsageSearch`], } @@ -1047,9 +1047,8 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it indexThroughput - var ept string - ept = apiDict[`SplunkIndexerThroughput`] + ept := apiDict[`SplunkIndexerThroughput`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1089,9 +1088,7 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended - var ept string - - ept = apiDict[`SplunkDataIndexesExtended`] + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1144,9 +1141,8 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended - var ept string - ept = apiDict[`SplunkDataIndexesExtended`] + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1192,9 +1188,8 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended - var ept string - ept = apiDict[`SplunkDataIndexesExtended`] + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1246,9 +1241,8 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended - var ept string - ept = apiDict[`SplunkDataIndexesExtended`] + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1300,9 +1294,8 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended - var ept string - ept = apiDict[`SplunkDataIndexesExtended`] + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1371,9 +1364,8 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended - var ept string - ept = apiDict[`SplunkDataIndexesExtended`] + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1435,9 +1427,8 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues - var ept string - ept = apiDict[`SplunkIntrospectionQueues`] + ept := apiDict[`SplunkIntrospectionQueues`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1484,9 +1475,8 @@ func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues - var ept string - ept = apiDict[`SplunkIntrospectionQueues`] + ept := apiDict[`SplunkIntrospectionQueues`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index db3eabd4c59b6..08086a6a3ee1e 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -11,9 +11,6 @@ import ( "testing" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" @@ -22,6 +19,10 @@ import ( "go.opentelemetry.io/collector/extension/auth" "go.opentelemetry.io/collector/receiver/receivertest" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) // handler function for mock server From 587eb838866deebcb468d8b70d9a79b580de6f79 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 20 Feb 2024 09:29:08 -0600 Subject: [PATCH 14/31] updated README --- receiver/splunkenterprisereceiver/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/splunkenterprisereceiver/README.md b/receiver/splunkenterprisereceiver/README.md index ad36fde47cea0..835b03ed0e692 100644 --- a/receiver/splunkenterprisereceiver/README.md +++ b/receiver/splunkenterprisereceiver/README.md @@ -8,7 +8,7 @@ jobs. ## Configuration -The following settings are required, omitting them will either cause your receiver to fail to compile or result in 4/5xx return codes during scraping. These must be set for each Splunk instance type (indexer, search head, or cluster master) from which you wish to pull metrics. At present, only one of each type is accepted, per configured receiver instance. +The following settings are required, omitting them will either cause your receiver to fail to compile or result in 4/5xx return codes during scraping. These must be set for each Splunk instance type (indexer, search head, or cluster master) from which you wish to pull metrics. At present, only one of each type is accepted, per configured receiver instance. This means, for example, that if you have three different "indexer" type instances that you would like to pull metrics from you will need to configure three different `splunkenterprise` receivers for each indexer node you wish to monitor. * `basicauth` (from [basicauthextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/basicauthextension)): A configured stanza for the basicauthextension. * `auth` (no default): String name referencing your auth extension. From 04f6438f3bfa3be3afaa21276657ef6d19f9c5f2 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 20 Feb 2024 12:20:13 -0600 Subject: [PATCH 15/31] added ctx to scrapes --- receiver/splunkenterprisereceiver/scraper.go | 44 ++++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index f73d9f4699d37..f132ed8c7b9e8 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -82,12 +82,12 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Each metric has its own scrape function associated with it func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled || !s.splunkClient.isConfigured(typeCm) { return } ctx = context.WithValue(ctx, endpointType("type"), typeCm) - // Because we have to utilize network resources for each KPI we should check that each metrics - // is enabled before proceeding sr := searchResponse{ search: searchDict[`SplunkLicenseIndexUsageSearch`], @@ -156,16 +156,16 @@ func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcomm } func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerAvgExecutionLatency.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkSchedulerAvgExecLatencySearch`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -234,16 +234,16 @@ func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcom } func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerAvgRate.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIndexerAvgRate`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -315,16 +315,16 @@ func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Ti } func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkAggregationQueueRatio.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkPipelineQueues`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -428,16 +428,16 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco } func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkBucketsSearchableStatus.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkBucketsSearchableStatus`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -516,16 +516,16 @@ func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now p } func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexesSize.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIndexesData`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -629,16 +629,16 @@ func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now p } func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerCompletionRatio.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkSchedulerCompletionRatio`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -707,16 +707,16 @@ func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context } func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerRawWriteTime.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIndexerRawWriteSeconds`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -785,16 +785,16 @@ func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, } func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerCPUTime.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIndexerCpuSeconds`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -863,16 +863,16 @@ func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now p } func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIoAvgIops.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIoAvgIops`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -941,16 +941,16 @@ func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Tim } func (s *splunkScraper) scrapeSchedulerRunTimeByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerAvgRunTime.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkSchedulerAvgRunTime`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request From 2701222f5358bceaeb728f345a1f4cf44320f3d4 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Thu, 8 Feb 2024 15:41:16 -0600 Subject: [PATCH 16/31] refactored scraper --- receiver/splunkenterprisereceiver/scraper.go | 205 +++++++++++------- .../splunkenterprisereceiver/scraper_test.go | 4 +- 2 files changed, 124 insertions(+), 85 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index f132ed8c7b9e8..a49daf981f13e 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -12,6 +12,7 @@ import ( "io" "net/http" "strconv" + "sync" "time" "go.opentelemetry.io/collector/component" @@ -52,38 +53,76 @@ func (s *splunkScraper) start(_ context.Context, h component.Host) (err error) { return nil } +// listens to the error channel and combines errors sent from different metric scrape functions, +// returning the concatinated error list should context timeout or a nil error value is sent in the +// channel signifying the end of a scrape cycle +func errorListener(ctx context.Context, eQueue <-chan error, eOut chan<- *scrapererror.ScrapeErrors) { + errs := &scrapererror.ScrapeErrors{} + + for { + select { + // context timeout + case <-ctx.Done(): + eOut <- errs + return + case err, ok := <-eQueue: + // shutdown + if err == nil || !ok { + eOut <- errs + return + // or add an error to errs + } else { + errs.Add(err) + } + } + } +} + // The big one: Describes how all scraping tasks should be performed. Part of the scraper interface func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { - errs := &scrapererror.ScrapeErrors{} + var wg sync.WaitGroup + errChan := make(chan error, 9) + errOut := make(chan *scrapererror.ScrapeErrors) + var errs *scrapererror.ScrapeErrors now := pcommon.NewTimestampFromTime(time.Now()) - - s.scrapeLicenseUsageByIndex(ctx, now, errs) - s.scrapeAvgExecLatencyByHost(ctx, now, errs) - s.scrapeSchedulerCompletionRatioByHost(ctx, now, errs) - s.scrapeIndexerAvgRate(ctx, now, errs) - s.scrapeSchedulerRunTimeByHost(ctx, now, errs) - s.scrapeIndexerRawWriteSecondsByHost(ctx, now, errs) - s.scrapeIndexerCPUSecondsByHost(ctx, now, errs) - s.scrapeAvgIopsByHost(ctx, now, errs) - s.scrapeIndexThroughput(ctx, now, errs) - s.scrapeIndexesTotalSize(ctx, now, errs) - s.scrapeIndexesEventCount(ctx, now, errs) - s.scrapeIndexesBucketCount(ctx, now, errs) - s.scrapeIndexesRawSize(ctx, now, errs) - s.scrapeIndexesBucketEventCount(ctx, now, errs) - s.scrapeIndexesBucketHotWarmCount(ctx, now, errs) - s.scrapeIntrospectionQueues(ctx, now, errs) - s.scrapeIntrospectionQueuesBytes(ctx, now, errs) - s.scrapeIndexerPipelineQueues(ctx, now, errs) - s.scrapeBucketsSearchableStatus(ctx, now, errs) - s.scrapeIndexesBucketCountAdHoc(ctx, now, errs) + metricScrapes := []func(context.Context, pcommon.Timestamp, chan error){ + s.scrapeLicenseUsageByIndex, + s.scrapeIndexThroughput, + s.scrapeIndexesTotalSize, + s.scrapeIndexesEventCount, + s.scrapeIndexesBucketCount, + s.scrapeIndexesRawSize, + s.scrapeIndexesBucketEventCount, + s.scrapeIndexesBucketHotWarmCount, + s.scrapeIntrospectionQueues, + s.scrapeIntrospectionQueuesBytes, + } + + go func() { + errorListener(ctx, errChan, errOut) + }() + + for _, fn := range metricScrapes { + wg.Add(1) + go func( + fn func(ctx context.Context, now pcommon.Timestamp, errs chan error), + ctx context.Context, + now pcommon.Timestamp, + errs chan error) { + // actual function body + defer wg.Done() + fn(ctx, now, errs) + }(fn, ctx, now, errChan) + } + + wg.Wait() + errChan <- nil + errs = <-errOut return s.mb.Emit(), errs.Combine() } // Each metric has its own scrape function associated with it -func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - // Because we have to utilize network resources for each KPI we should check that each metrics - // is enabled before proceeding +func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled || !s.splunkClient.isConfigured(typeCm) { return } @@ -104,20 +143,20 @@ func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcomm for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -132,7 +171,7 @@ func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcomm } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -147,7 +186,7 @@ func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcomm case "By": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkLicenseIndexUsageDataPoint(now, int64(v), indexName) @@ -1040,7 +1079,7 @@ func unmarshallSearchReq(res *http.Response, sr *searchResponse) error { } // Scrape index throughput introspection endpoint -func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1052,26 +1091,26 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } @@ -1081,7 +1120,7 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T } // Scrape indexes extended total size -func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1092,26 +1131,26 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } @@ -1125,7 +1164,7 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. mb, err := strconv.ParseFloat(f.Content.TotalSize, 64) totalSize = int64(mb * 1024 * 1024) if err != nil { - errs.Add(err) + errs <- err } } @@ -1134,7 +1173,7 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. } // Scrape indexes extended total event count -func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1146,26 +1185,26 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } @@ -1181,7 +1220,7 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon } // Scrape indexes extended total bucket count -func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1193,26 +1232,26 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } @@ -1225,7 +1264,7 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo if f.Content.TotalBucketCount != "" { totalBucketCount, err = strconv.ParseInt(f.Content.TotalBucketCount, 10, 64) if err != nil { - errs.Add(err) + errs <- err } } @@ -1234,7 +1273,7 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo } // Scrape indexes extended raw size -func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1246,26 +1285,26 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } @@ -1279,7 +1318,7 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti mb, err := strconv.ParseFloat(f.Content.TotalRawSize, 64) totalRawSize = int64(mb * 1024 * 1024) if err != nil { - errs.Add(err) + errs <- err } } s.mb.RecordSplunkDataIndexesExtendedRawSizeDataPoint(now, totalRawSize, name) @@ -1287,7 +1326,7 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti } // Scrape indexes extended bucket event count -func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1299,26 +1338,26 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } @@ -1333,7 +1372,7 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p bucketDir = "cold" bucketEventCount, err = strconv.ParseInt(f.Content.BucketDirs.Cold.EventCount, 10, 64) if err != nil { - errs.Add(err) + errs <- err } s.mb.RecordSplunkDataIndexesExtendedBucketEventCountDataPoint(now, bucketEventCount, name, bucketDir) } @@ -1341,7 +1380,7 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p bucketDir = "home" bucketEventCount, err = strconv.ParseInt(f.Content.BucketDirs.Home.EventCount, 10, 64) if err != nil { - errs.Add(err) + errs <- err } s.mb.RecordSplunkDataIndexesExtendedBucketEventCountDataPoint(now, bucketEventCount, name, bucketDir) } @@ -1349,7 +1388,7 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p bucketDir = "thawed" bucketEventCount, err = strconv.ParseInt(f.Content.BucketDirs.Thawed.EventCount, 10, 64) if err != nil { - errs.Add(err) + errs <- err } s.mb.RecordSplunkDataIndexesExtendedBucketEventCountDataPoint(now, bucketEventCount, name, bucketDir) } @@ -1357,7 +1396,7 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p } // Scrape indexes extended bucket hot/warm count -func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1369,26 +1408,26 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } @@ -1404,7 +1443,7 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now bucketHotCount, err = strconv.ParseInt(f.Content.BucketDirs.Home.HotBucketCount, 10, 64) bucketDir = "hot" if err != nil { - errs.Add(err) + errs <- err } s.mb.RecordSplunkDataIndexesExtendedBucketHotCountDataPoint(now, bucketHotCount, name, bucketDir) } @@ -1412,7 +1451,7 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now bucketWarmCount, err = strconv.ParseInt(f.Content.BucketDirs.Home.WarmBucketCount, 10, 64) bucketDir = "warm" if err != nil { - errs.Add(err) + errs <- err } s.mb.RecordSplunkDataIndexesExtendedBucketWarmCountDataPoint(now, bucketWarmCount, name, bucketDir) } @@ -1420,7 +1459,7 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now } // Scrape introspection queues -func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1432,26 +1471,26 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } @@ -1468,7 +1507,7 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm } // Scrape introspection queues bytes -func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now pcommon.Timestamp, errs chan error) { if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } @@ -1480,26 +1519,26 @@ func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { - errs.Add(err) + errs <- err return } res, err := s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - errs.Add(err) + errs <- err return } err = json.Unmarshal(body, &it) if err != nil { - errs.Add(err) + errs <- err return } var name string diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index 08086a6a3ee1e..42ecd5fe692b2 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -53,9 +53,9 @@ func createMockServer() *httptest.Server { switch r.URL.String() { case "/services/server/introspection/indexer?output_mode=json": mockIndexerThroughput(w, r) - case "/services/data/indexes-extended?output_mode=json&count=-1": + case "/services/data/indexes-extended": mockIndexesExtended(w, r) - case "/services/server/introspection/queues?output_mode=json&count=-1": + case "/services/server/introspection/queues": mockIntrospectionQueues(w, r) default: http.NotFoundHandler().ServeHTTP(w, r) From f544db1dd21b361ffafd0c06014f4270570fdef7 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 20 Feb 2024 11:26:21 -0600 Subject: [PATCH 17/31] added new metrics and fixed lints --- receiver/splunkenterprisereceiver/scraper.go | 152 ++++++++++--------- 1 file changed, 81 insertions(+), 71 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index a49daf981f13e..979d02f663577 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -54,7 +54,7 @@ func (s *splunkScraper) start(_ context.Context, h component.Host) (err error) { } // listens to the error channel and combines errors sent from different metric scrape functions, -// returning the concatinated error list should context timeout or a nil error value is sent in the +// returning the combined error list should context timeout or a nil error value is sent in the // channel signifying the end of a scrape cycle func errorListener(ctx context.Context, eQueue <-chan error, eOut chan<- *scrapererror.ScrapeErrors) { errs := &scrapererror.ScrapeErrors{} @@ -96,6 +96,16 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { s.scrapeIndexesBucketHotWarmCount, s.scrapeIntrospectionQueues, s.scrapeIntrospectionQueuesBytes, + s.scrapeAvgExecLatencyByHost, + s.scrapeIndexerPipelineQueues, + s.scrapeBucketsSearchableStatus, + s.scrapeIndexesBucketCountAdHoc, + s.scrapeSchedulerCompletionRatioByHost, + s.scrapeIndexerRawWriteSecondsByHost, + s.scrapeIndexerCPUSecondsByHost, + s.scrapeAvgIopsByHost, + s.scrapeSchedulerRunTimeByHost, + s.scrapeIndexerAvgRate, } go func() { @@ -123,11 +133,12 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Each metric has its own scrape function associated with it func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs chan error) { + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled || !s.splunkClient.isConfigured(typeCm) { return } ctx = context.WithValue(ctx, endpointType("type"), typeCm) - sr := searchResponse{ search: searchDict[`SplunkLicenseIndexUsageSearch`], } @@ -194,13 +205,12 @@ func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcomm } } -func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerAvgExecutionLatency.Enabled { return } - sr := searchResponse{ search: searchDict[`SplunkSchedulerAvgExecLatencySearch`], } @@ -217,20 +227,20 @@ func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcom for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -249,7 +259,7 @@ func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcom } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -264,7 +274,7 @@ func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcom case "latency_avg_exec": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkSchedulerAvgExecutionLatencyDataPoint(now, v, host) @@ -272,7 +282,7 @@ func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcom } } -func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerAvgRate.Enabled { @@ -295,20 +305,20 @@ func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Ti for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -331,7 +341,7 @@ func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Ti } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -345,7 +355,7 @@ func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Ti case "indexer_avg_kbps": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexerAvgRateDataPoint(now, v, host) @@ -353,7 +363,7 @@ func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Ti } } -func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkAggregationQueueRatio.Enabled { @@ -376,20 +386,20 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -413,7 +423,7 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } @@ -429,21 +439,21 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco case "agg_queue_ratio": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkAggregationQueueRatioDataPoint(now, v, host) case "index_queue_ratio": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexerQueueRatioDataPoint(now, v, host) case "parse_queue_ratio": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkParseQueueRatioDataPoint(now, v, host) @@ -451,14 +461,14 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco v, err := strconv.ParseInt(f.Value, 10, 64) ps = v if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkPipelineSetCountDataPoint(now, ps, host) case "typing_queue_ratio": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkTypingQueueRatioDataPoint(now, v, host) @@ -466,7 +476,7 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco } } -func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkBucketsSearchableStatus.Enabled { @@ -489,20 +499,20 @@ func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now p for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -526,7 +536,7 @@ func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now p } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -546,7 +556,7 @@ func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now p v, err := strconv.ParseInt(f.Value, 10, 64) bc = v if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkBucketsSearchableStatusDataPoint(now, bc, host, searchable) @@ -554,7 +564,7 @@ func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now p } } -func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexesSize.Enabled { @@ -577,20 +587,20 @@ func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now p for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -614,7 +624,7 @@ func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now p } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -629,21 +639,21 @@ func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now p case "total_size_gb": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexesSizeDataPoint(now, v, indexer) case "average_size_gb": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexesAvgSizeDataPoint(now, v, indexer) case "average_usage_perc": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexesAvgUsageDataPoint(now, v, indexer) @@ -651,7 +661,7 @@ func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now p v, err := strconv.ParseInt(f.Value, 10, 64) bc = v if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexesMedianDataAgeDataPoint(now, bc, indexer) @@ -659,7 +669,7 @@ func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now p v, err := strconv.ParseInt(f.Value, 10, 64) bc = v if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexesBucketCountDataPoint(now, bc, indexer) @@ -667,7 +677,7 @@ func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now p } } -func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerCompletionRatio.Enabled { @@ -690,20 +700,20 @@ func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -722,7 +732,7 @@ func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -737,7 +747,7 @@ func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context case "completion_ratio": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkSchedulerCompletionRatioDataPoint(now, v, host) @@ -745,7 +755,7 @@ func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context } } -func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerRawWriteTime.Enabled { @@ -768,20 +778,20 @@ func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -800,7 +810,7 @@ func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -815,7 +825,7 @@ func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, case "raw_data_write_seconds": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexerRawWriteTimeDataPoint(now, v, host) @@ -823,7 +833,7 @@ func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, } } -func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerCPUTime.Enabled { @@ -846,20 +856,20 @@ func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now p for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -878,7 +888,7 @@ func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now p } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -893,7 +903,7 @@ func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now p case "service_cpu_seconds": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIndexerCPUTimeDataPoint(now, v, host) @@ -901,7 +911,7 @@ func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now p } } -func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIoAvgIops.Enabled { @@ -924,20 +934,20 @@ func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Tim for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -956,7 +966,7 @@ func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Tim } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -971,7 +981,7 @@ func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Tim case "iops": v, err := strconv.ParseInt(f.Value, 10, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkIoAvgIopsDataPoint(now, v, host) @@ -979,7 +989,7 @@ func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Tim } } -func (s *splunkScraper) scrapeSchedulerRunTimeByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { +func (s *splunkScraper) scrapeSchedulerRunTimeByHost(ctx context.Context, now pcommon.Timestamp, errs chan error) { // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerAvgRunTime.Enabled { @@ -1002,20 +1012,20 @@ func (s *splunkScraper) scrapeSchedulerRunTimeByHost(ctx context.Context, now pc for { req, err = s.splunkClient.createRequest(ctx, &sr) if err != nil { - errs.Add(err) + errs <- err return } res, err = s.splunkClient.makeRequest(req) if err != nil { - errs.Add(err) + errs <- err return } // if its a 204 the body will be empty because we are still waiting on search results err = unmarshallSearchReq(res, &sr) if err != nil { - errs.Add(err) + errs <- err } res.Body.Close() @@ -1034,7 +1044,7 @@ func (s *splunkScraper) scrapeSchedulerRunTimeByHost(ctx context.Context, now pc } if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { - errs.Add(errMaxSearchWaitTimeExceeded) + errs <- errMaxSearchWaitTimeExceeded return } } @@ -1049,7 +1059,7 @@ func (s *splunkScraper) scrapeSchedulerRunTimeByHost(ctx context.Context, now pc case "run_time_avg": v, err := strconv.ParseFloat(f.Value, 64) if err != nil { - errs.Add(err) + errs <- err continue } s.mb.RecordSplunkSchedulerAvgRunTimeDataPoint(now, v, host) From 07481d569eb9b0aeeeb13da19ffb23bc25f68a34 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 20 Feb 2024 11:40:25 -0600 Subject: [PATCH 18/31] fixed scraper test --- receiver/splunkenterprisereceiver/scraper_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index 42ecd5fe692b2..08086a6a3ee1e 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -53,9 +53,9 @@ func createMockServer() *httptest.Server { switch r.URL.String() { case "/services/server/introspection/indexer?output_mode=json": mockIndexerThroughput(w, r) - case "/services/data/indexes-extended": + case "/services/data/indexes-extended?output_mode=json&count=-1": mockIndexesExtended(w, r) - case "/services/server/introspection/queues": + case "/services/server/introspection/queues?output_mode=json&count=-1": mockIntrospectionQueues(w, r) default: http.NotFoundHandler().ServeHTTP(w, r) From 95819ef1fbe2456017e413df3919ec515afdcf15 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Thu, 18 Jan 2024 13:34:58 -0600 Subject: [PATCH 19/31] updated client/scraper --- receiver/splunkenterprisereceiver/config.go | 1 + .../internal/metadata/generated_config.go | 2 +- receiver/splunkenterprisereceiver/scraper.go | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index f9ac3488aa65a..a5ce13dd6b11f 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -16,6 +16,7 @@ import ( ) var ( + errUnspecifiedEndpoint = errors.New("endpoint to an unspecified node type") errBadOrMissingEndpoint = errors.New("missing a valid endpoint") errBadScheme = errors.New("endpoint scheme must be either http or https") errMissingAuthExtension = errors.New("auth extension missing from config") diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go index 5b80b4fb6dcc1..7e9248ece59b9 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go @@ -15,7 +15,7 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { if parser == nil { return nil } - err := parser.Unmarshal(ms) + err := parser.Unmarshal(ms, confmap.WithErrorUnused()) if err != nil { return err } diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index e1f4f229de044..254493464fcd8 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -44,8 +44,8 @@ func newSplunkMetricsScraper(params receiver.CreateSettings, cfg *Config) splunk } // Create a client instance and add to the splunkScraper -func (s *splunkScraper) start(_ context.Context, h component.Host) (err error) { - client, err := newSplunkEntClient(s.conf, h, s.settings) +func (s *splunkScraper) start(ctx context.Context, h component.Host) (err error) { + client, err := newSplunkEntClient(ctx, s.conf, h, s.settings) if err != nil { return err } From 4912fe88bce0019c62962d1ab953ac74968148c8 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Thu, 18 Jan 2024 16:55:20 -0600 Subject: [PATCH 20/31] refactored client --- receiver/splunkenterprisereceiver/client.go | 30 +++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index 6c0bb419f7663..d0ece4f351e34 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -24,12 +24,16 @@ const ( var ( errCtxMissingEndpointType = errors.New("context was passed without the endpoint type included") errEndpointTypeNotFound = errors.New("requested client is not configured and could not be found in splunkEntClient") +<<<<<<< HEAD errNoClientFound = errors.New("no client corresponding to the endpoint type was found") +======= +>>>>>>> d22d920047 (refactored client) ) // Type wrapper for accessing context value type endpointType string +<<<<<<< HEAD // Wrapper around splunkClientMap to avoid awkward reference/dereference stuff that arises when using maps in golang type splunkEntClient struct { clients splunkClientMap @@ -51,6 +55,24 @@ func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySett var e *url.URL var c *http.Client clientMap := make(splunkClientMap) +======= +// The splunkEntClient is made up of a number of splunkClients defined for each configured endpoint +type splunkEntClient map[any]*splunkClient + +// The client does not carry the endpoint that is configured with it and golang does not support mixed +// type arrays so this struct contains the pair: the client configured for the endpoint and the endpoint +// itself +type splunkClient struct { + client *http.Client + endpoint *url.URL +} + +func newSplunkEntClient(ctx context.Context, cfg *Config, h component.Host, s component.TelemetrySettings) (*splunkEntClient, error) { + var err error + var e *url.URL + var c *http.Client + splunkEntClient := make(splunkEntClient) +>>>>>>> d22d920047 (refactored client) // if the endpoint is defined, put it in the endpoints map for later use // we already checked that url.Parse does not fail in cfg.Validate() @@ -60,7 +82,11 @@ func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySett if err != nil { return nil, err } +<<<<<<< HEAD clientMap[typeIdx] = splunkClient{ +======= + splunkEntClient[typeIdx] = &splunkClient{ +>>>>>>> d22d920047 (refactored client) client: c, endpoint: e, } @@ -71,7 +97,11 @@ func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySett if err != nil { return nil, err } +<<<<<<< HEAD clientMap[typeSh] = splunkClient{ +======= + splunkEntClient[typeSh] = &splunkClient{ +>>>>>>> d22d920047 (refactored client) client: c, endpoint: e, } From 30d0cde3c9c35fe6071bad9e20519dc56184c414 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Mon, 22 Jan 2024 15:30:33 -0600 Subject: [PATCH 21/31] fixed tests --- receiver/splunkenterprisereceiver/client.go | 30 -------------------- receiver/splunkenterprisereceiver/scraper.go | 4 +-- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index d0ece4f351e34..6c0bb419f7663 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -24,16 +24,12 @@ const ( var ( errCtxMissingEndpointType = errors.New("context was passed without the endpoint type included") errEndpointTypeNotFound = errors.New("requested client is not configured and could not be found in splunkEntClient") -<<<<<<< HEAD errNoClientFound = errors.New("no client corresponding to the endpoint type was found") -======= ->>>>>>> d22d920047 (refactored client) ) // Type wrapper for accessing context value type endpointType string -<<<<<<< HEAD // Wrapper around splunkClientMap to avoid awkward reference/dereference stuff that arises when using maps in golang type splunkEntClient struct { clients splunkClientMap @@ -55,24 +51,6 @@ func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySett var e *url.URL var c *http.Client clientMap := make(splunkClientMap) -======= -// The splunkEntClient is made up of a number of splunkClients defined for each configured endpoint -type splunkEntClient map[any]*splunkClient - -// The client does not carry the endpoint that is configured with it and golang does not support mixed -// type arrays so this struct contains the pair: the client configured for the endpoint and the endpoint -// itself -type splunkClient struct { - client *http.Client - endpoint *url.URL -} - -func newSplunkEntClient(ctx context.Context, cfg *Config, h component.Host, s component.TelemetrySettings) (*splunkEntClient, error) { - var err error - var e *url.URL - var c *http.Client - splunkEntClient := make(splunkEntClient) ->>>>>>> d22d920047 (refactored client) // if the endpoint is defined, put it in the endpoints map for later use // we already checked that url.Parse does not fail in cfg.Validate() @@ -82,11 +60,7 @@ func newSplunkEntClient(ctx context.Context, cfg *Config, h component.Host, s co if err != nil { return nil, err } -<<<<<<< HEAD clientMap[typeIdx] = splunkClient{ -======= - splunkEntClient[typeIdx] = &splunkClient{ ->>>>>>> d22d920047 (refactored client) client: c, endpoint: e, } @@ -97,11 +71,7 @@ func newSplunkEntClient(ctx context.Context, cfg *Config, h component.Host, s co if err != nil { return nil, err } -<<<<<<< HEAD clientMap[typeSh] = splunkClient{ -======= - splunkEntClient[typeSh] = &splunkClient{ ->>>>>>> d22d920047 (refactored client) client: c, endpoint: e, } diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 254493464fcd8..e1f4f229de044 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -44,8 +44,8 @@ func newSplunkMetricsScraper(params receiver.CreateSettings, cfg *Config) splunk } // Create a client instance and add to the splunkScraper -func (s *splunkScraper) start(ctx context.Context, h component.Host) (err error) { - client, err := newSplunkEntClient(ctx, s.conf, h, s.settings) +func (s *splunkScraper) start(_ context.Context, h component.Host) (err error) { + client, err := newSplunkEntClient(s.conf, h, s.settings) if err != nil { return err } From e45c9eaeccdb6065a987c2b1a679a48eb50683a7 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 30 Jan 2024 10:16:04 -0600 Subject: [PATCH 22/31] added check for unconfigured endpoint in scrape --- receiver/splunkenterprisereceiver/client.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index 6c0bb419f7663..73f7da474a922 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -183,3 +183,10 @@ func (c *splunkEntClient) isConfigured(v string) bool { _, ok := c.clients[v] return ok } + +// Check if the splunkEntClient contains a configured endpoint for the type of scraper +// Returns true if an entry exists, false if not. +func (c *splunkEntClient) isConfigured(v string) bool { + _, ok := (*c)[v] + return ok +} From cd514b2842d2f98b747799c9ff8544e42f02c114 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 30 Jan 2024 11:43:58 -0600 Subject: [PATCH 23/31] fixed types for scrapes --- receiver/splunkenterprisereceiver/scraper.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index e1f4f229de044..890761a0a2adc 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -1096,7 +1096,7 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it indexThroughput ept := apiDict[`SplunkIndexerThroughput`] @@ -1137,7 +1137,7 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1190,7 +1190,7 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1237,7 +1237,7 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1290,7 +1290,7 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1343,7 +1343,7 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1413,7 +1413,7 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1476,7 +1476,7 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IntrospectionQueues ept := apiDict[`SplunkIntrospectionQueues`] @@ -1524,7 +1524,7 @@ func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now return } - ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var it IntrospectionQueues ept := apiDict[`SplunkIntrospectionQueues`] From a03114442bc032e6b2d6fde6029550ed494f4469 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 31 Jan 2024 09:41:48 -0600 Subject: [PATCH 24/31] metric endpoint types --- receiver/splunkenterprisereceiver/scraper.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 890761a0a2adc..e1f4f229de044 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -1096,7 +1096,7 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it indexThroughput ept := apiDict[`SplunkIndexerThroughput`] @@ -1137,7 +1137,7 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1190,7 +1190,7 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1237,7 +1237,7 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1290,7 +1290,7 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1343,7 +1343,7 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1413,7 +1413,7 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IndexesExtended ept := apiDict[`SplunkDataIndexesExtended`] @@ -1476,7 +1476,7 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues ept := apiDict[`SplunkIntrospectionQueues`] @@ -1524,7 +1524,7 @@ func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now return } - ctx = context.WithValue(ctx, endpointType("type"), typeCm) + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) var it IntrospectionQueues ept := apiDict[`SplunkIntrospectionQueues`] From ec75ac25b724a4671510bbc213247f1ca8906b85 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Tue, 13 Feb 2024 09:51:24 -0600 Subject: [PATCH 25/31] modified client to avoid pointer party --- receiver/splunkenterprisereceiver/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index 73f7da474a922..056dd9cddf25d 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -187,6 +187,6 @@ func (c *splunkEntClient) isConfigured(v string) bool { // Check if the splunkEntClient contains a configured endpoint for the type of scraper // Returns true if an entry exists, false if not. func (c *splunkEntClient) isConfigured(v string) bool { - _, ok := (*c)[v] + _, ok := c.clients[v] return ok } From 2caf79175bea40dce86365b450aad75bc0ff7ac0 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 14 Feb 2024 14:43:23 -0600 Subject: [PATCH 26/31] make generate --- receiver/splunkenterprisereceiver/config.go | 3 +-- receiver/splunkenterprisereceiver/config_test.go | 3 +-- receiver/splunkenterprisereceiver/factory.go | 3 +-- receiver/splunkenterprisereceiver/factory_test.go | 3 +-- .../internal/metadata/generated_config.go | 2 +- receiver/splunkenterprisereceiver/scraper.go | 3 +-- receiver/splunkenterprisereceiver/scraper_test.go | 7 +++---- 7 files changed, 9 insertions(+), 15 deletions(-) diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index a5ce13dd6b11f..90698eb4d38b0 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -8,11 +8,10 @@ import ( "net/url" "strings" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/multierr" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) var ( diff --git a/receiver/splunkenterprisereceiver/config_test.go b/receiver/splunkenterprisereceiver/config_test.go index 1693c940d6eb3..6562627e43103 100644 --- a/receiver/splunkenterprisereceiver/config_test.go +++ b/receiver/splunkenterprisereceiver/config_test.go @@ -7,14 +7,13 @@ import ( "path/filepath" "testing" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap/confmaptest" "go.uber.org/multierr" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) func TestLoadConfig(t *testing.T) { diff --git a/receiver/splunkenterprisereceiver/factory.go b/receiver/splunkenterprisereceiver/factory.go index d0b9343c63a14..37cada962c753 100644 --- a/receiver/splunkenterprisereceiver/factory.go +++ b/receiver/splunkenterprisereceiver/factory.go @@ -7,14 +7,13 @@ import ( "context" "time" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) const ( diff --git a/receiver/splunkenterprisereceiver/factory_test.go b/receiver/splunkenterprisereceiver/factory_test.go index be717b5c73f80..9c9fae445b1ab 100644 --- a/receiver/splunkenterprisereceiver/factory_test.go +++ b/receiver/splunkenterprisereceiver/factory_test.go @@ -8,14 +8,13 @@ import ( "testing" "time" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/receiver/receivertest" "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) func TestFactoryCreate(t *testing.T) { diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go index 7e9248ece59b9..5b80b4fb6dcc1 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go @@ -15,7 +15,7 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { if parser == nil { return nil } - err := parser.Unmarshal(ms, confmap.WithErrorUnused()) + err := parser.Unmarshal(ms) if err != nil { return err } diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index e1f4f229de044..aac2f9797a071 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -15,13 +15,12 @@ import ( "sync" "time" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scrapererror" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) var ( diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index c243ecd3ac29f..9c680cfb9fa39 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -11,6 +11,9 @@ import ( "testing" "time" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" @@ -19,10 +22,6 @@ import ( "go.opentelemetry.io/collector/extension/auth" "go.opentelemetry.io/collector/receiver/receivertest" "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) // handler function for mock server From de6866b1a23954f0e270706848cd0fa3cbaa61ba Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 14 Feb 2024 15:17:47 -0600 Subject: [PATCH 27/31] linted --- receiver/splunkenterprisereceiver/client.go | 7 ------- receiver/splunkenterprisereceiver/config.go | 4 ++-- receiver/splunkenterprisereceiver/config_test.go | 3 ++- receiver/splunkenterprisereceiver/factory.go | 3 ++- receiver/splunkenterprisereceiver/factory_test.go | 3 ++- receiver/splunkenterprisereceiver/scraper.go | 3 ++- receiver/splunkenterprisereceiver/scraper_test.go | 7 ++++--- 7 files changed, 14 insertions(+), 16 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index 056dd9cddf25d..6c0bb419f7663 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -183,10 +183,3 @@ func (c *splunkEntClient) isConfigured(v string) bool { _, ok := c.clients[v] return ok } - -// Check if the splunkEntClient contains a configured endpoint for the type of scraper -// Returns true if an entry exists, false if not. -func (c *splunkEntClient) isConfigured(v string) bool { - _, ok := c.clients[v] - return ok -} diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index 90698eb4d38b0..f9ac3488aa65a 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -8,14 +8,14 @@ import ( "net/url" "strings" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) var ( - errUnspecifiedEndpoint = errors.New("endpoint to an unspecified node type") errBadOrMissingEndpoint = errors.New("missing a valid endpoint") errBadScheme = errors.New("endpoint scheme must be either http or https") errMissingAuthExtension = errors.New("auth extension missing from config") diff --git a/receiver/splunkenterprisereceiver/config_test.go b/receiver/splunkenterprisereceiver/config_test.go index 6562627e43103..1693c940d6eb3 100644 --- a/receiver/splunkenterprisereceiver/config_test.go +++ b/receiver/splunkenterprisereceiver/config_test.go @@ -7,13 +7,14 @@ import ( "path/filepath" "testing" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap/confmaptest" "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) func TestLoadConfig(t *testing.T) { diff --git a/receiver/splunkenterprisereceiver/factory.go b/receiver/splunkenterprisereceiver/factory.go index 37cada962c753..d0b9343c63a14 100644 --- a/receiver/splunkenterprisereceiver/factory.go +++ b/receiver/splunkenterprisereceiver/factory.go @@ -7,13 +7,14 @@ import ( "context" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) const ( diff --git a/receiver/splunkenterprisereceiver/factory_test.go b/receiver/splunkenterprisereceiver/factory_test.go index 9c9fae445b1ab..be717b5c73f80 100644 --- a/receiver/splunkenterprisereceiver/factory_test.go +++ b/receiver/splunkenterprisereceiver/factory_test.go @@ -8,13 +8,14 @@ import ( "testing" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/receiver/receivertest" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) func TestFactoryCreate(t *testing.T) { diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index aac2f9797a071..e1f4f229de044 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -15,12 +15,13 @@ import ( "sync" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scrapererror" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) var ( diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index 9c680cfb9fa39..c243ecd3ac29f 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -11,9 +11,6 @@ import ( "testing" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" @@ -22,6 +19,10 @@ import ( "go.opentelemetry.io/collector/extension/auth" "go.opentelemetry.io/collector/receiver/receivertest" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) // handler function for mock server From 2207171634dc65bcaa12205270a793662d6c06aa Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 6 Mar 2024 08:08:18 -0600 Subject: [PATCH 28/31] make generate --- receiver/splunkenterprisereceiver/generated_component_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/receiver/splunkenterprisereceiver/generated_component_test.go b/receiver/splunkenterprisereceiver/generated_component_test.go index e69f7c4f39bf2..8c27f93f0a177 100644 --- a/receiver/splunkenterprisereceiver/generated_component_test.go +++ b/receiver/splunkenterprisereceiver/generated_component_test.go @@ -6,7 +6,6 @@ import ( "context" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" From 7890413558095ff009b3db488883aef1439aa7cd Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 6 Mar 2024 08:17:31 -0600 Subject: [PATCH 29/31] fixed changes in tests --- receiver/splunkenterprisereceiver/client_test.go | 6 +++--- receiver/splunkenterprisereceiver/scraper_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/receiver/splunkenterprisereceiver/client_test.go b/receiver/splunkenterprisereceiver/client_test.go index 1f47faa28882f..8dd873da9f02c 100644 --- a/receiver/splunkenterprisereceiver/client_test.go +++ b/receiver/splunkenterprisereceiver/client_test.go @@ -47,7 +47,7 @@ func TestClientCreation(t *testing.T) { host := &mockHost{ extensions: map[component.ID]component.Component{ - component.NewID("basicauth/client"): auth.NewClient(), + component.MustNewIDWithName("basicauth", "client"): auth.NewClient(), }, } // create a client from an example config @@ -76,7 +76,7 @@ func TestClientCreateRequest(t *testing.T) { host := &mockHost{ extensions: map[component.ID]component.Component{ - component.NewID("basicauth/client"): auth.NewClient(), + component.MustNewIDWithName("basicauth", "client"): auth.NewClient(), }, } // create a client from an example config @@ -157,7 +157,7 @@ func TestAPIRequestCreate(t *testing.T) { host := &mockHost{ extensions: map[component.ID]component.Component{ - component.NewID("basicauth/client"): auth.NewClient(), + component.MustNewIDWithName("basicauth", "client"): auth.NewClient(), }, } // create a client from an example config diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index c243ecd3ac29f..f6e6c547d1a2a 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -105,7 +105,7 @@ func TestScraper(t *testing.T) { host := &mockHost{ extensions: map[component.ID]component.Component{ - component.NewID("basicauth/client"): auth.NewClient(), + component.MustNewIDWithName("basicauth", "client"): auth.NewClient(), }, } From b4c579ef3ae08d448fe72edf39c23ae942f1a8b1 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Mon, 18 Mar 2024 14:24:15 -0500 Subject: [PATCH 30/31] channel buffer size reflects number of metrics --- receiver/splunkenterprisereceiver/scraper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index e1f4f229de044..5ef09e161d558 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -81,7 +81,6 @@ func errorListener(ctx context.Context, eQueue <-chan error, eOut chan<- *scrape // The big one: Describes how all scraping tasks should be performed. Part of the scraper interface func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { var wg sync.WaitGroup - errChan := make(chan error, 9) errOut := make(chan *scrapererror.ScrapeErrors) var errs *scrapererror.ScrapeErrors now := pcommon.NewTimestampFromTime(time.Now()) @@ -107,6 +106,7 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { s.scrapeSchedulerRunTimeByHost, s.scrapeIndexerAvgRate, } + errChan := make(chan error, len(metricScrapes)) go func() { errorListener(ctx, errChan, errOut) From 1f2211680e47a1f07323c844160fea91d17a71a4 Mon Sep 17 00:00:00 2001 From: shalper2 Date: Wed, 3 Apr 2024 11:04:19 -0500 Subject: [PATCH 31/31] simplified error loop --- receiver/splunkenterprisereceiver/scraper.go | 25 +++++--------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 1a0631e5e207c..482c4a33fb619 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -56,26 +56,13 @@ func (s *splunkScraper) start(_ context.Context, h component.Host) (err error) { // listens to the error channel and combines errors sent from different metric scrape functions, // returning the combined error list should context timeout or a nil error value is sent in the // channel signifying the end of a scrape cycle -func errorListener(ctx context.Context, eQueue <-chan error, eOut chan<- *scrapererror.ScrapeErrors) { +func errorListener(eQueue <-chan error, eOut chan<- *scrapererror.ScrapeErrors) { errs := &scrapererror.ScrapeErrors{} - for { - select { - // context timeout - case <-ctx.Done(): - eOut <- errs - return - case err, ok := <-eQueue: - // shutdown - if err == nil || !ok { - eOut <- errs - return - // or add an error to errs - } else { - errs.Add(err) - } - } + for err := range eQueue { + errs.Add(err) } + eOut <- errs } // The big one: Describes how all scraping tasks should be performed. Part of the scraper interface @@ -109,7 +96,7 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { errChan := make(chan error, len(metricScrapes)) go func() { - errorListener(ctx, errChan, errOut) + errorListener(errChan, errOut) }() for _, fn := range metricScrapes { @@ -126,7 +113,7 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { } wg.Wait() - errChan <- nil + close(errChan) errs = <-errOut return s.mb.Emit(), errs.Combine() }