Skip to content

Commit

Permalink
Rename Metric to Decider. (#3603)
Browse files Browse the repository at this point in the history
* Rename Metric to Decider.

* Rename MetricOption to DeciderOption.

* Remove stutter in Deciders API.
  • Loading branch information
markusthoemmes authored and josephburnett committed Apr 2, 2019
1 parent c0e78fa commit ca4fffb
Show file tree
Hide file tree
Showing 13 changed files with 216 additions and 216 deletions.
28 changes: 14 additions & 14 deletions cmd/autoscaler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,35 +177,35 @@ func scalerConfig(logger *zap.SugaredLogger) *autoscaler.DynamicConfig {
return dynConfig
}

func uniScalerFactoryFunc(endpointsInformer corev1informers.EndpointsInformer) func(metric *autoscaler.Metric, dynamicConfig *autoscaler.DynamicConfig) (autoscaler.UniScaler, error) {
return func(metric *autoscaler.Metric, dynamicConfig *autoscaler.DynamicConfig) (autoscaler.UniScaler, error) {
func uniScalerFactoryFunc(endpointsInformer corev1informers.EndpointsInformer) func(decider *autoscaler.Decider, dynamicConfig *autoscaler.DynamicConfig) (autoscaler.UniScaler, error) {
return func(decider *autoscaler.Decider, dynamicConfig *autoscaler.DynamicConfig) (autoscaler.UniScaler, error) {
// Create a stats reporter which tags statistics by PA namespace, configuration name, and PA name.
reporter, err := autoscaler.NewStatsReporter(metric.Namespace,
labelValueOrEmpty(metric, serving.ServiceLabelKey), labelValueOrEmpty(metric, serving.ConfigurationLabelKey), metric.Name)
reporter, err := autoscaler.NewStatsReporter(decider.Namespace,
labelValueOrEmpty(decider, serving.ServiceLabelKey), labelValueOrEmpty(decider, serving.ConfigurationLabelKey), decider.Name)
if err != nil {
return nil, err
}

revName := metric.Labels[serving.RevisionLabelKey]
revName := decider.Labels[serving.RevisionLabelKey]
if revName == "" {
return nil, fmt.Errorf("no Revision label found in Metric: %v", metric)
return nil, fmt.Errorf("no Revision label found in Decider: %v", decider)
}

return autoscaler.New(dynamicConfig, metric.Namespace,
return autoscaler.New(dynamicConfig, decider.Namespace,
reconciler.GetServingK8SServiceNameForObj(revName), endpointsInformer,
metric.Spec.TargetConcurrency, reporter)
decider.Spec.TargetConcurrency, reporter)
}
}

func statsScraperFactoryFunc(endpointsLister corev1listers.EndpointsLister) func(metric *autoscaler.Metric) (autoscaler.StatsScraper, error) {
return func(metric *autoscaler.Metric) (autoscaler.StatsScraper, error) {
return autoscaler.NewServiceScraper(metric, endpointsLister)
func statsScraperFactoryFunc(endpointsLister corev1listers.EndpointsLister) func(decider *autoscaler.Decider) (autoscaler.StatsScraper, error) {
return func(decider *autoscaler.Decider) (autoscaler.StatsScraper, error) {
return autoscaler.NewServiceScraper(decider, endpointsLister)
}
}

func labelValueOrEmpty(metric *autoscaler.Metric, labelKey string) string {
if metric.Labels != nil {
if value, ok := metric.Labels[labelKey]; ok {
func labelValueOrEmpty(decider *autoscaler.Decider, labelKey string) string {
if decider.Labels != nil {
if value, ok := decider.Labels[labelKey]; ok {
return value
}
}
Expand Down
8 changes: 4 additions & 4 deletions cmd/autoscaler/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ const (
)

func TestLabelValueOrEmpty(t *testing.T) {
metric := &autoscaler.Metric{}
metric := &autoscaler.Decider{}
metric.Labels = make(map[string]string)
metric.Labels["test1"] = "test1val"
metric.Labels["test2"] = ""
Expand Down Expand Up @@ -65,7 +65,7 @@ func TestLabelValueOrEmpty(t *testing.T) {

func TestUniScalerFactoryFunc(t *testing.T) {
uniScalerFactory := getTestUniScalerFactory()
metric := &autoscaler.Metric{
metric := &autoscaler.Decider{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: testRevision,
Expand All @@ -81,7 +81,7 @@ func TestUniScalerFactoryFunc(t *testing.T) {

func TestUniScalerFactoryFunc_FailWhenRevisionLabelMissing(t *testing.T) {
uniScalerFactory := getTestUniScalerFactory()
metric := &autoscaler.Metric{
metric := &autoscaler.Decider{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: testRevision,
Expand All @@ -94,7 +94,7 @@ func TestUniScalerFactoryFunc_FailWhenRevisionLabelMissing(t *testing.T) {
}
}

func getTestUniScalerFactory() func(metric *autoscaler.Metric, dynamicConfig *autoscaler.DynamicConfig) (autoscaler.UniScaler, error) {
func getTestUniScalerFactory() func(decider *autoscaler.Decider, dynamicConfig *autoscaler.DynamicConfig) (autoscaler.UniScaler, error) {
kubeClient := fakeK8s.NewSimpleClientset()
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
return uniScalerFactoryFunc(kubeInformer.Core().V1().Endpoints())
Expand Down
4 changes: 2 additions & 2 deletions pkg/autoscaler/autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,8 @@ func New(
}, nil
}

// Update reconfigures the UniScaler according to the MetricSpec.
func (a *Autoscaler) Update(spec MetricSpec) error {
// Update reconfigures the UniScaler according to the DeciderSpec.
func (a *Autoscaler) Update(spec DeciderSpec) error {
a.targetMutex.Lock()
defer a.targetMutex.Unlock()
a.target = spec.TargetConcurrency
Expand Down
2 changes: 1 addition & 1 deletion pkg/autoscaler/autoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ func TestAutoscaler_UpdateTarget(t *testing.T) {
podCount: 10,
})
a.expectScale(t, now, 10, true)
a.Update(MetricSpec{
a.Update(DeciderSpec{
TargetConcurrency: 1.0,
})
a.expectScale(t, now, 100, true)
Expand Down
90 changes: 45 additions & 45 deletions pkg/autoscaler/multiscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,26 +42,26 @@ const (
scrapeTickInterval = time.Second / 3
)

// Metric is a resource which observes the request load of a Revision and
// Decider is a resource which observes the request load of a Revision and
// recommends a number of replicas to run.
// +k8s:deepcopy-gen=true
type Metric struct {
type Decider struct {
metav1.ObjectMeta
Spec MetricSpec
Status MetricStatus
Spec DeciderSpec
Status DeciderStatus
}

// MetricSpec is the parameters in which the Revision should scaled.
type MetricSpec struct {
// DeciderSpec is the parameters in which the Revision should scaled.
type DeciderSpec struct {
TargetConcurrency float64
}

// MetricStatus is the current scale recommendation.
type MetricStatus struct {
// DeciderStatus is the current scale recommendation.
type DeciderStatus struct {
DesiredScale int32
}

// UniScaler records statistics for a particular Metric and proposes the scale for the Metric's target based on those statistics.
// UniScaler records statistics for a particular Decider and proposes the scale for the Decider's target based on those statistics.
type UniScaler interface {
// Record records the given statistics.
Record(context.Context, Stat)
Expand All @@ -70,15 +70,15 @@ type UniScaler interface {
// The returned boolean is true if and only if a proposal was returned.
Scale(context.Context, time.Time) (int32, bool)

// Update reconfigures the UniScaler according to the MetricSpec.
Update(MetricSpec) error
// Update reconfigures the UniScaler according to the DeciderSpec.
Update(DeciderSpec) error
}

// UniScalerFactory creates a UniScaler for a given PA using the given dynamic configuration.
type UniScalerFactory func(*Metric, *DynamicConfig) (UniScaler, error)
type UniScalerFactory func(*Decider, *DynamicConfig) (UniScaler, error)

// StatsScraperFactory creates a StatsScraper for a given PA using the given dynamic configuration.
type StatsScraperFactory func(*Metric) (StatsScraper, error)
type StatsScraperFactory func(*Decider) (StatsScraper, error)

// scalerRunner wraps a UniScaler and a channel for implementing shutdown behavior.
type scalerRunner struct {
Expand All @@ -87,21 +87,21 @@ type scalerRunner struct {
pokeCh chan struct{}

// mux guards access to metric
mux sync.RWMutex
metric Metric
mux sync.RWMutex
decider Decider
}

func (sr *scalerRunner) getLatestScale() int32 {
sr.mux.RLock()
defer sr.mux.RUnlock()
return sr.metric.Status.DesiredScale
return sr.decider.Status.DesiredScale
}

func (sr *scalerRunner) updateLatestScale(new int32) bool {
sr.mux.Lock()
defer sr.mux.Unlock()
if sr.metric.Status.DesiredScale != new {
sr.metric.Status.DesiredScale = new
if sr.decider.Status.DesiredScale != new {
sr.decider.Status.DesiredScale = new
return true
}
return false
Expand Down Expand Up @@ -151,23 +151,23 @@ func NewMultiScaler(
}
}

// Get return the current Metric.
func (m *MultiScaler) Get(ctx context.Context, namespace, name string) (*Metric, error) {
// Get return the current Decider.
func (m *MultiScaler) Get(ctx context.Context, namespace, name string) (*Decider, error) {
key := NewMetricKey(namespace, name)
m.scalersMutex.RLock()
defer m.scalersMutex.RUnlock()
scaler, exists := m.scalers[key]
if !exists {
// This GroupResource is a lie, but unfortunately this interface requires one.
return nil, errors.NewNotFound(kpa.Resource("Metrics"), key)
return nil, errors.NewNotFound(kpa.Resource("Deciders"), key)
}
scaler.mux.RLock()
defer scaler.mux.RUnlock()
return (&scaler.metric).DeepCopy(), nil
return (&scaler.decider).DeepCopy(), nil
}

// Create instantiates the desired Metric.
func (m *MultiScaler) Create(ctx context.Context, metric *Metric) (*Metric, error) {
// Create instantiates the desired Decider.
func (m *MultiScaler) Create(ctx context.Context, metric *Decider) (*Decider, error) {
m.scalersMutex.Lock()
defer m.scalersMutex.Unlock()
key := NewMetricKey(metric.Namespace, metric.Name)
Expand All @@ -182,26 +182,26 @@ func (m *MultiScaler) Create(ctx context.Context, metric *Metric) (*Metric, erro
}
scaler.mux.RLock()
defer scaler.mux.RUnlock()
return (&scaler.metric).DeepCopy(), nil
return (&scaler.decider).DeepCopy(), nil
}

// Update applied the desired MetricSpec to a currently running Metric.
func (m *MultiScaler) Update(ctx context.Context, metric *Metric) (*Metric, error) {
key := NewMetricKey(metric.Namespace, metric.Name)
// Update applied the desired DeciderSpec to a currently running Decider.
func (m *MultiScaler) Update(ctx context.Context, decider *Decider) (*Decider, error) {
key := NewMetricKey(decider.Namespace, decider.Name)
m.scalersMutex.Lock()
defer m.scalersMutex.Unlock()
if scaler, exists := m.scalers[key]; exists {
scaler.mux.Lock()
defer scaler.mux.Unlock()
scaler.metric = *metric
scaler.scaler.Update(metric.Spec)
return metric, nil
scaler.decider = *decider
scaler.scaler.Update(decider.Spec)
return decider, nil
}
// This GroupResource is a lie, but unfortunately this interface requires one.
return nil, errors.NewNotFound(kpa.Resource("Metrics"), key)
return nil, errors.NewNotFound(kpa.Resource("Deciders"), key)
}

// Delete stops and removes a Metric.
// Delete stops and removes a Decider.
func (m *MultiScaler) Delete(ctx context.Context, namespace, name string) error {
key := NewMetricKey(namespace, name)
m.scalersMutex.Lock()
Expand All @@ -213,7 +213,7 @@ func (m *MultiScaler) Delete(ctx context.Context, namespace, name string) error
return nil
}

// Watch registers a singleton function to call when MetricStatus is updated.
// Watch registers a singleton function to call when DeciderStatus is updated.
func (m *MultiScaler) Watch(fn func(string)) {
m.watcherMutex.Lock()
defer m.watcherMutex.Unlock()
Expand Down Expand Up @@ -247,21 +247,21 @@ func (m *MultiScaler) setScale(metricKey string, scale int32) bool {
return true
}

func (m *MultiScaler) createScaler(ctx context.Context, metric *Metric) (*scalerRunner, error) {
func (m *MultiScaler) createScaler(ctx context.Context, decider *Decider) (*scalerRunner, error) {

scaler, err := m.uniScalerFactory(metric, m.dynConfig)
scaler, err := m.uniScalerFactory(decider, m.dynConfig)
if err != nil {
return nil, err
}

stopCh := make(chan struct{})
runner := &scalerRunner{
scaler: scaler,
stopCh: stopCh,
metric: *metric,
pokeCh: make(chan struct{}),
scaler: scaler,
stopCh: stopCh,
decider: *decider,
pokeCh: make(chan struct{}),
}
runner.metric.Status.DesiredScale = -1
runner.decider.Status.DesiredScale = -1

ticker := time.NewTicker(m.dynConfig.Current().TickInterval)

Expand All @@ -284,9 +284,9 @@ func (m *MultiScaler) createScaler(ctx context.Context, metric *Metric) (*scaler
}
}()

scraper, err := m.statsScraperFactory(metric)
scraper, err := m.statsScraperFactory(decider)
if err != nil {
return nil, fmt.Errorf("failed to create a stats scraper for metric %q: %v", metric.Name, err)
return nil, fmt.Errorf("failed to create a stats scraper for metric %q: %v", decider.Name, err)
}
scraperTicker := time.NewTicker(scrapeTickInterval)
go func() {
Expand All @@ -310,7 +310,7 @@ func (m *MultiScaler) createScaler(ctx context.Context, metric *Metric) (*scaler
}
}()

metricKey := NewMetricKey(metric.Namespace, metric.Name)
metricKey := NewMetricKey(decider.Namespace, decider.Name)
go func() {
for {
select {
Expand Down Expand Up @@ -350,7 +350,7 @@ func (m *MultiScaler) tickScaler(ctx context.Context, scaler UniScaler, scaleCha
}
}

// RecordStat records some statistics for the given Metric.
// RecordStat records some statistics for the given Decider.
func (m *MultiScaler) RecordStat(key string, stat Stat) {
m.scalersMutex.RLock()
defer m.scalersMutex.RUnlock()
Expand Down

0 comments on commit ca4fffb

Please sign in to comment.