From 6e54c13943de1fa49e3503b140406094a5439ab6 Mon Sep 17 00:00:00 2001 From: Hugo Bollon Date: Mon, 5 Jul 2021 16:08:50 +0200 Subject: [PATCH] docs: update readme with new aws configuration fields --- README.md | 29 +++++++++++------ state/aws.go | 86 ++++++++++++++++++++++++++----------------------- state/gcp.go | 61 +++++++++++++++++++++-------------- state/gitlab.go | 24 +++++++++----- state/state.go | 8 ++--- state/tfe.go | 49 +++++++++++++++++----------- 6 files changed, 151 insertions(+), 106 deletions(-) diff --git a/README.md b/README.md index 1f3e979d..43b0a9b1 100644 --- a/README.md +++ b/README.md @@ -146,8 +146,8 @@ Data resiliency is not paramount though as this dataset can be rebuilt upon your Terraboard currently supports configuration in three different ways: -1. Environment variables **(only usable for mono provider configuration)** -2. CLI parameters **(only usable for mono provider configuration)** +1. Environment variables **(only usable for single provider configuration)** +2. CLI parameters **(only usable for single provider configuration)** 3. Configuration file (YAML). A configuration file example can be found in the root directory of this repository and in the `test/` subdirectory. **Important: all flags/environment variables related to the providers settings aren't compatible with multi-provider configuration! Instead, you must use the YAML config file to be able to configure multiples buckets/providers.** @@ -171,18 +171,18 @@ aws: - endpoint: http://minio:9000/ region: eu-west-1 s3: - bucket: test-bucket - force-path-style: true - file-extension: - - .tfstate + - bucket: test-bucket + force-path-style: true + file-extension: + - .tfstate - endpoint: http://minio:9000/ region: eu-west-1 s3: - bucket: test-bucket2 - force-path-style: true - file-extension: - - .tfstate + - bucket: test-bucket2 + force-path-style: true + file-extension: + - .tfstate ``` In the case of AWS, don't forget to set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. @@ -243,6 +243,15 @@ You can find a ready-to-use Docker example with two *MinIO* buckets in the `test #### AWS (and S3 compatible providers) Options +- `--aws-access-key` AWS account access key. + - Env: *AWS_ACCESS_KEY_ID* + - Yaml: *aws.access-key* +- `--aws-secret-access-key` AWS secret account access key. + - Env: *AWS_SECRET_ACCESS_KEY* + - Yaml: *aws.secret-access-key* +- `--aws-session-token` AWS session token. + - Env: *AWS_SESSION_TOKEN* + - Yaml: *aws.session-token* - `--dynamodb-table` AWS DynamoDB table for locks. - Env: *AWS_DYNAMODB_TABLE* - Yaml: *aws.dynamodb-table* diff --git a/state/aws.go b/state/aws.go index 5a9e8108..991c71df 100644 --- a/state/aws.go +++ b/state/aws.go @@ -32,49 +32,55 @@ type AWS struct { } // NewAWS creates an AWS object -func NewAWS(c *config.Config) []*AWS { +func NewAWS(aws config.AWSConfig, bucket config.S3BucketConfig, noLocks, noVersioning bool) *AWS { + if bucket.Bucket != "" { + sess := session.Must(session.NewSession()) + awsConfig := aws_sdk.NewConfig() + var creds *credentials.Credentials + if len(aws.APPRoleArn) > 0 { + log.Debugf("Using %s role", aws.APPRoleArn) + creds = stscreds.NewCredentials(sess, aws.APPRoleArn, func(p *stscreds.AssumeRoleProvider) { + if aws.ExternalID != "" { + p.ExternalID = aws_sdk.String(aws.ExternalID) + } + }) + } else { + if aws.AccessKey == "" || aws.SecretAccessKey == "" { + log.Fatal("Missing AccessKey or SecretAccessKey for AWS provider. Please check your configuration and retry") + } + creds = credentials.NewStaticCredentials(aws.AccessKey, aws.SecretAccessKey, aws.SessionToken) + } + awsConfig.WithCredentials(creds) + + if e := aws.Endpoint; e != "" { + awsConfig.WithEndpoint(e) + } + if e := aws.Region; e != "" { + awsConfig.WithRegion(e) + } + awsConfig.S3ForcePathStyle = &bucket.ForcePathStyle + + return &AWS{ + svc: s3.New(sess, awsConfig), + bucket: bucket.Bucket, + keyPrefix: bucket.KeyPrefix, + fileExtension: bucket.FileExtension, + dynamoSvc: dynamodb.New(sess, awsConfig), + dynamoTable: aws.DynamoDBTable, + noLocks: noLocks, + noVersioning: noVersioning, + } + } + + return nil +} + +// NewAWSCollection instantiate all needed AWS objects configurated by the user and return a slice +func NewAWSCollection(c *config.Config) []*AWS { var awsInstances []*AWS for _, aws := range c.AWS { for _, bucket := range aws.S3 { - if bucket.Bucket != "" { - sess := session.Must(session.NewSession()) - awsConfig := aws_sdk.NewConfig() - var creds *credentials.Credentials - if len(aws.APPRoleArn) > 0 { - log.Debugf("Using %s role", aws.APPRoleArn) - creds = stscreds.NewCredentials(sess, aws.APPRoleArn, func(p *stscreds.AssumeRoleProvider) { - if aws.ExternalID != "" { - p.ExternalID = aws_sdk.String(aws.ExternalID) - } - }) - } else { - if aws.AccessKey == "" || aws.SecretAccessKey == "" { - log.Fatal("Missing AccessKey or SecretAccessKey for AWS provider. Please check your configuration and retry") - } - creds = credentials.NewStaticCredentials(aws.AccessKey, aws.SecretAccessKey, aws.SessionToken) - } - awsConfig.WithCredentials(creds) - - if e := aws.Endpoint; e != "" { - awsConfig.WithEndpoint(e) - } - if e := aws.Region; e != "" { - awsConfig.WithRegion(e) - } - awsConfig.S3ForcePathStyle = &bucket.ForcePathStyle - - instance := &AWS{ - svc: s3.New(sess, awsConfig), - bucket: bucket.Bucket, - keyPrefix: bucket.KeyPrefix, - fileExtension: bucket.FileExtension, - dynamoSvc: dynamodb.New(sess, awsConfig), - dynamoTable: aws.DynamoDBTable, - noLocks: c.Provider.NoLocks, - noVersioning: c.Provider.NoVersioning, - } - awsInstances = append(awsInstances, instance) - } + awsInstances = append(awsInstances, NewAWS(aws, bucket, c.Provider.NoLocks, c.Provider.NoVersioning)) } } diff --git a/state/gcp.go b/state/gcp.go index 13e8cf7f..29f038d4 100644 --- a/state/gcp.go +++ b/state/gcp.go @@ -24,39 +24,50 @@ type GCP struct { } // NewGCP creates an GCP object -func NewGCP(c *config.Config) ([]*GCP, error) { +func NewGCP(gcp config.GCPConfig) (*GCP, error) { ctx := context.Background() var client *storage.Client - var gcpInstances []*GCP + var gcpInstance *GCP var err error - for _, gcp := range c.GCP { - if gcp.GCSBuckets != nil { - if gcp.GCPSAKey != "" { - log.WithFields(log.Fields{ - "path": gcp.GCPSAKey, - }).Info("Authenticating using service account key") - opt := option.WithCredentialsFile(gcp.GCPSAKey) - client, err = storage.NewClient(ctx, opt) // Use service account key - } else { - client, err = storage.NewClient(ctx) // Use base credentials - } + if gcp.GCSBuckets != nil { + if gcp.GCPSAKey != "" { + log.WithFields(log.Fields{ + "path": gcp.GCPSAKey, + }).Info("Authenticating using service account key") + opt := option.WithCredentialsFile(gcp.GCPSAKey) + client, err = storage.NewClient(ctx, opt) // Use service account key + } else { + client, err = storage.NewClient(ctx) // Use base credentials + } - if err != nil { - log.Fatalf("Failed to create client: %v", err) - return nil, err - } + if err != nil { + log.Fatalf("Failed to create client: %v", err) + return nil, err + } - instance := &GCP{ - svc: client, - buckets: gcp.GCSBuckets, - } - gcpInstances = append(gcpInstances, instance) + gcpInstance = &GCP{ + svc: client, + buckets: gcp.GCSBuckets, + } - log.WithFields(log.Fields{ - "buckets": gcp.GCSBuckets, - }).Info("Client successfully created") + log.WithFields(log.Fields{ + "buckets": gcp.GCSBuckets, + }).Info("Client successfully created") + } + + return gcpInstance, nil +} + +// NewGCPCollection instantiate all needed GCP objects configurated by the user and return a slice +func NewGCPCollection(c *config.Config) ([]*GCP, error) { + var gcpInstances []*GCP + for _, gcp := range c.GCP { + gcpInstance, err := NewGCP(gcp) + if err != nil { + return nil, err } + gcpInstances = append(gcpInstances, gcpInstance) } return gcpInstances, nil diff --git a/state/gitlab.go b/state/gitlab.go index 6f48cf2e..9c2186d7 100644 --- a/state/gitlab.go +++ b/state/gitlab.go @@ -18,16 +18,24 @@ type Gitlab struct { } // NewGitlab creates a new Gitlab object -func NewGitlab(c *config.Config) []*Gitlab { - var gitlabInstances []*Gitlab - for _, gl := range c.Gitlab { - if gl.Token != "" { - instance := &Gitlab{ - Client: gitlab.NewClient(gl.Address, gl.Token), - } - gitlabInstances = append(gitlabInstances, instance) +func NewGitlab(gl config.GitlabConfig) *Gitlab { + var instance *Gitlab + if gl.Token != "" { + instance = &Gitlab{ + Client: gitlab.NewClient(gl.Address, gl.Token), } } + + return instance +} + +// NewGitlabCollection instantiate all needed Gitlab objects configurated by the user and return a slice +func NewGitlabCollection(c *config.Config) []*Gitlab { + var gitlabInstances []*Gitlab + for _, gitlab := range c.Gitlab { + gitlabInstances = append(gitlabInstances, NewGitlab(gitlab)) + } + return gitlabInstances } diff --git a/state/state.go b/state/state.go index 7407abfa..8fe99da2 100644 --- a/state/state.go +++ b/state/state.go @@ -43,7 +43,7 @@ type Provider interface { func Configure(c *config.Config) ([]Provider, error) { var providers []Provider if len(c.TFE) > 0 { - objs, err := NewTFE(c) + objs, err := NewTFECollection(c) if err != nil { return []Provider{}, err } @@ -56,7 +56,7 @@ func Configure(c *config.Config) ([]Provider, error) { } if len(c.GCP) > 0 { - objs, err := NewGCP(c) + objs, err := NewGCPCollection(c) if err != nil { return []Provider{}, err } @@ -69,7 +69,7 @@ func Configure(c *config.Config) ([]Provider, error) { } if len(c.Gitlab) > 0 { - objs := NewGitlab(c) + objs := NewGitlabCollection(c) if len(objs) > 0 { log.Info("Using Gitab as state/locks provider") for _, glObj := range objs { @@ -79,7 +79,7 @@ func Configure(c *config.Config) ([]Provider, error) { } if len(c.AWS) > 0 { - objs := NewAWS(c) + objs := NewAWSCollection(c) if len(objs) > 0 { log.Info("Using AWS (S3+DynamoDB) as state/locks provider") for _, awsObj := range objs { diff --git a/state/tfe.go b/state/tfe.go index ed227ce1..8ca07982 100644 --- a/state/tfe.go +++ b/state/tfe.go @@ -19,28 +19,39 @@ type TFE struct { } // NewTFE creates a new TFE object -func NewTFE(c *config.Config) ([]*TFE, error) { - var tfeInstances []*TFE - for _, tfeObj := range c.TFE { - if tfeObj.Token != "" { - config := &tfe.Config{ - Address: tfeObj.Address, - Token: tfeObj.Token, - } +func NewTFE(tfeObj config.TFEConfig) (*TFE, error) { + var tfeInstance *TFE + if tfeObj.Token != "" { + config := &tfe.Config{ + Address: tfeObj.Address, + Token: tfeObj.Token, + } - client, err := tfe.NewClient(config) - if err != nil { - return nil, err - } + client, err := tfe.NewClient(config) + if err != nil { + return nil, err + } - ctx := context.Background() - instance := &TFE{ - Client: client, - org: tfeObj.Organization, - ctx: &ctx, - } - tfeInstances = append(tfeInstances, instance) + ctx := context.Background() + tfeInstance = &TFE{ + Client: client, + org: tfeObj.Organization, + ctx: &ctx, + } + } + + return tfeInstance, nil +} + +// NewTFECollection instantiate all needed GCP objects configurated by the user and return a slice +func NewTFECollection(c *config.Config) ([]*TFE, error) { + var tfeInstances []*TFE + for _, tfe := range c.TFE { + tfeInstance, err := NewTFE(tfe) + if err != nil { + return nil, err } + tfeInstances = append(tfeInstances, tfeInstance) } return tfeInstances, nil