diff --git a/Dockerfile b/Dockerfile index 9bd4851c..b57bcb5a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15 as builder +FROM golang:1.17 as builder WORKDIR /opt/build COPY . . RUN make build diff --git a/Dockerfile.dev b/Dockerfile.dev index 929b3a03..9c6715f4 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -1,4 +1,4 @@ -FROM golang:1.15 +FROM golang:1.17 WORKDIR /opt/build COPY . . RUN make install diff --git a/go.mod b/go.mod index 75a87de7..164529cc 100644 --- a/go.mod +++ b/go.mod @@ -1,60 +1,113 @@ module github.com/camptocamp/terraboard -go 1.15 +go 1.17 require ( cloud.google.com/go/storage v1.12.0 github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/agext/levenshtein v1.2.3 + github.com/apparentlymart/go-cidr v1.1.0 github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 github.com/apparentlymart/go-versions v1.0.1 github.com/aws/aws-sdk-go v1.37.2 + github.com/bmatcuk/doublestar v1.1.5 + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/davecgh/go-spew v1.1.1 - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/swag v0.19.15 // indirect github.com/go-test/deep v1.0.3 github.com/google/go-cmp v0.5.6 + github.com/google/uuid v1.2.0 github.com/gorilla/mux v1.8.0 github.com/gusaul/go-dynamock v0.0.0-20210107061312-3e989056e1e6 github.com/hashicorp/errwrap v1.1.0 github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-getter v1.5.2 github.com/hashicorp/go-hclog v0.15.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-retryablehttp v0.6.8 - github.com/hashicorp/go-slug v0.6.0 // indirect github.com/hashicorp/go-tfe v0.15.0 github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/hcl/v2 v2.10.0 + github.com/hashicorp/hcl/v2 v2.12.0 github.com/hashicorp/terraform v1.0.2 github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 - github.com/jackc/pgproto3/v2 v2.0.7 // indirect github.com/jessevdk/go-flags v1.5.0 - github.com/lib/pq v1.9.0 // indirect github.com/machinebox/graphql v0.2.2 - github.com/mailru/easyjson v0.7.7 // indirect - github.com/matryer/is v1.4.0 // indirect github.com/mitchellh/copystructure v1.2.0 - github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/panicwrap v1.0.0 + github.com/mitchellh/go-homedir v1.1.0 github.com/pmezard/go-difflib v1.0.0 github.com/sirupsen/logrus v1.7.0 github.com/spf13/afero v1.2.2 github.com/stretchr/testify v1.7.0 - github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 // indirect github.com/swaggo/http-swagger v1.1.1 github.com/swaggo/swag v1.7.3 github.com/zclconf/go-cty v1.9.0 + github.com/zclconf/go-cty-yaml v1.0.2 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/mod v0.4.2 golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b - golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect - golang.org/x/tools v0.1.7 // indirect + golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef + golang.org/x/text v0.3.7 google.golang.org/api v0.44.0-impersonate-preview gopkg.in/yaml.v2 v2.4.0 gorm.io/datatypes v1.0.1 gorm.io/driver/postgres v1.1.0 gorm.io/gorm v1.21.10 ) + +require ( + cloud.google.com/go v0.79.0 // indirect + github.com/KyleBanks/depth v1.2.1 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect + github.com/fatih/color v1.7.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.6 // indirect + github.com/go-openapi/spec v0.20.3 // indirect + github.com/go-openapi/swag v0.19.15 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.4.3 // indirect + github.com/google/go-querystring v1.0.0 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/hashicorp/go-safetemp v1.0.0 // indirect + github.com/hashicorp/go-slug v0.6.0 // indirect + github.com/hashicorp/jsonapi v0.0.0-20210518035559-1e50d74c8db3 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.8.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.0.7 // indirect + github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect + github.com/jackc/pgtype v1.7.0 // indirect + github.com/jackc/pgx/v4 v4.11.0 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.2 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jstemmer/go-junit-report v0.9.1 // indirect + github.com/klauspost/compress v1.11.2 // indirect + github.com/lib/pq v1.9.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matryer/is v1.4.0 // indirect + github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 // indirect + github.com/ulikunitz/xz v0.5.8 // indirect + go.opencensus.io v0.23.0 // indirect + golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect + golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 // indirect + golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect + golang.org/x/tools v0.1.7 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6 // indirect + google.golang.org/grpc v1.36.0 // indirect + google.golang.org/protobuf v1.25.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect +) diff --git a/go.sum b/go.sum index 7b3baa75..5f3280b4 100644 --- a/go.sum +++ b/go.sum @@ -106,6 +106,7 @@ github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNi github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= @@ -137,8 +138,10 @@ github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bmatcuk/doublestar v1.1.5 h1:2bNwBOmhyFEFcoB3tGvTD5xanq+4kyOZlB8wFYbMjkk= github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= @@ -164,8 +167,10 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -331,6 +336,7 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= @@ -367,6 +373,7 @@ github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuD github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-getter v1.5.2 h1:XDo8LiAcDisiqZdv0TKgz+HtX3WN7zA2JD1R1tjsabE= github.com/hashicorp/go-getter v1.5.2/go.mod h1:orNH3BTYLu/fIxGIdLjLoAJHWMDQ/UKQr5O4m3iBuoo= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -384,6 +391,7 @@ github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-retryablehttp v0.6.8 h1:92lWxgpa+fF3FozM4B3UZtHZMJX8T5XT+TFdCxsPyWs= github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-slug v0.4.1/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= github.com/hashicorp/go-slug v0.6.0 h1:O0I3/jjkuDpHSvjTtidEeiW1LmO0cd/Zy4f5axfa00g= @@ -407,8 +415,9 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= -github.com/hashicorp/hcl/v2 v2.10.0 h1:1S1UnuhDGlv3gRFV4+0EdwB+znNP5HmcGbIqwnSCByg= github.com/hashicorp/hcl/v2 v2.10.0/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/hcl/v2 v2.12.0 h1:PsYxySWpMD4KPaoJLnsHwtK5Qptvj/4Q6s0t4sUxZf4= +github.com/hashicorp/hcl/v2 v2.12.0/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= github.com/hashicorp/jsonapi v0.0.0-20210518035559-1e50d74c8db3 h1:mzwkutymYIXR5oQT9YnfbLuuw7LZmksiHKRPUTN5ijo= github.com/hashicorp/jsonapi v0.0.0-20210518035559-1e50d74c8db3/go.mod h1:Yog5+CPEM3c99L1CL2CFCYoSzgWm5vTU58idbRUaLik= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -522,6 +531,7 @@ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALr github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.2 h1:MiK62aErc3gIiVEtyzKfeOHgW7atJb5g/KNX5m3c2nQ= github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -597,9 +607,11 @@ github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFW github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -610,7 +622,6 @@ github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/panicwrap v1.0.0 h1:67zIyVakCIvcs69A0FGfZjBdPleaonSgGlXRSRlb6fE= github.com/mitchellh/panicwrap v1.0.0/go.mod h1:pKvZHwWrZowLUzftuFq7coarnxbBXU4aQh3N0BJOeeA= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -752,6 +763,7 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1 github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tombuildsstuff/giovanni v0.15.1/go.mod h1:0TZugJPEtqzPlMpuJHYfXY6Dq2uLPrXf98D2XQSxNbA= github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -775,6 +787,7 @@ github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUA github.com/zclconf/go-cty v1.9.0 h1:IgJxw5b4LPXCPeqFjjhLaNEA8NKXMyaEUdAd399acts= github.com/zclconf/go-cty v1.9.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0= github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1002,6 +1015,7 @@ golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/terraform/addrs/check.go b/internal/terraform/addrs/check.go new file mode 100644 index 00000000..a50f51cd --- /dev/null +++ b/internal/terraform/addrs/check.go @@ -0,0 +1,86 @@ +package addrs + +import "fmt" + +// Check is the address of a check rule within a checkable object. +// +// This represents the check rule globally within a configuration, and is used +// during graph evaluation to identify a condition result object to update with +// the result of check rule evaluation. +// +// The check address is not distinct from resource traversals, and check rule +// values are not intended to be available to the language, so the address is +// not Referenceable. +// +// Note also that the check address is only relevant within the scope of a run, +// as reordering check blocks between runs will result in their addresses +// changing. +type Check struct { + Container Checkable + Type CheckType + Index int +} + +func (c Check) String() string { + container := c.Container.String() + switch c.Type { + case ResourcePrecondition: + return fmt.Sprintf("%s.preconditions[%d]", container, c.Index) + case ResourcePostcondition: + return fmt.Sprintf("%s.postconditions[%d]", container, c.Index) + case OutputPrecondition: + return fmt.Sprintf("%s.preconditions[%d]", container, c.Index) + default: + // This should not happen + return fmt.Sprintf("%s.conditions[%d]", container, c.Index) + } +} + +// Checkable is an interface implemented by all address types that can contain +// condition blocks. +type Checkable interface { + checkableSigil() + + // Check returns the address of an individual check rule of a specified + // type and index within this checkable container. + Check(CheckType, int) Check + String() string +} + +var ( + _ Checkable = AbsResourceInstance{} + _ Checkable = AbsOutputValue{} +) + +type checkable struct { +} + +func (c checkable) checkableSigil() { +} + +// CheckType describes the category of check. +//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckType check.go +type CheckType int + +const ( + InvalidCondition CheckType = 0 + ResourcePrecondition CheckType = 1 + ResourcePostcondition CheckType = 2 + OutputPrecondition CheckType = 3 +) + +// Description returns a human-readable description of the check type. This is +// presented in the user interface through a diagnostic summary. +func (c CheckType) Description() string { + switch c { + case ResourcePrecondition: + return "Resource precondition" + case ResourcePostcondition: + return "Resource postcondition" + case OutputPrecondition: + return "Module output value precondition" + default: + // This should not happen + return "Condition" + } +} diff --git a/internal/terraform/addrs/checktype_string.go b/internal/terraform/addrs/checktype_string.go new file mode 100644 index 00000000..8c2fcebd --- /dev/null +++ b/internal/terraform/addrs/checktype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=CheckType check.go"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidCondition-0] + _ = x[ResourcePrecondition-1] + _ = x[ResourcePostcondition-2] + _ = x[OutputPrecondition-3] +} + +const _CheckType_name = "InvalidConditionResourcePreconditionResourcePostconditionOutputPrecondition" + +var _CheckType_index = [...]uint8{0, 16, 36, 57, 75} + +func (i CheckType) String() string { + if i < 0 || i >= CheckType(len(_CheckType_index)-1) { + return "CheckType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _CheckType_name[_CheckType_index[i]:_CheckType_index[i+1]] +} diff --git a/internal/terraform/addrs/count_attr.go b/internal/terraform/addrs/count_attr.go index 90a5faf0..0be5c026 100644 --- a/internal/terraform/addrs/count_attr.go +++ b/internal/terraform/addrs/count_attr.go @@ -10,3 +10,9 @@ type CountAttr struct { func (ca CountAttr) String() string { return "count." + ca.Name } + +func (ca CountAttr) UniqueKey() UniqueKey { + return ca // A CountAttr is its own UniqueKey +} + +func (ca CountAttr) uniqueKeySigil() {} diff --git a/internal/terraform/addrs/for_each_attr.go b/internal/terraform/addrs/for_each_attr.go index 7a638503..6b0c0609 100644 --- a/internal/terraform/addrs/for_each_attr.go +++ b/internal/terraform/addrs/for_each_attr.go @@ -10,3 +10,9 @@ type ForEachAttr struct { func (f ForEachAttr) String() string { return "each." + f.Name } + +func (f ForEachAttr) UniqueKey() UniqueKey { + return f // A ForEachAttr is its own UniqueKey +} + +func (f ForEachAttr) uniqueKeySigil() {} diff --git a/internal/terraform/addrs/input_variable.go b/internal/terraform/addrs/input_variable.go index 975c72f1..e85743bc 100644 --- a/internal/terraform/addrs/input_variable.go +++ b/internal/terraform/addrs/input_variable.go @@ -14,6 +14,12 @@ func (v InputVariable) String() string { return "var." + v.Name } +func (v InputVariable) UniqueKey() UniqueKey { + return v // A InputVariable is its own UniqueKey +} + +func (v InputVariable) uniqueKeySigil() {} + // Absolute converts the receiver into an absolute address within the given // module instance. func (v InputVariable) Absolute(m ModuleInstance) AbsInputVariableInstance { diff --git a/internal/terraform/addrs/local_value.go b/internal/terraform/addrs/local_value.go index 61a07b9c..60176500 100644 --- a/internal/terraform/addrs/local_value.go +++ b/internal/terraform/addrs/local_value.go @@ -14,6 +14,12 @@ func (v LocalValue) String() string { return "local." + v.Name } +func (v LocalValue) UniqueKey() UniqueKey { + return v // A LocalValue is its own UniqueKey +} + +func (v LocalValue) uniqueKeySigil() {} + // Absolute converts the receiver into an absolute address within the given // module instance. func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { diff --git a/internal/terraform/addrs/module.go b/internal/terraform/addrs/module.go index 18f3dbec..4ae72b4e 100644 --- a/internal/terraform/addrs/module.go +++ b/internal/terraform/addrs/module.go @@ -95,6 +95,10 @@ func (m Module) TargetContains(other Targetable) bool { } } +func (m Module) AddrType() TargetableAddrType { + return ModuleAddrType +} + // Child returns the address of a child call in the receiver, identified by the // given name. func (m Module) Child(name string) Module { @@ -146,3 +150,7 @@ func (m Module) Ancestors() []Module { } return ret } + +func (m Module) configMoveableSigil() { + // ModuleInstance is moveable +} diff --git a/internal/terraform/addrs/module_call.go b/internal/terraform/addrs/module_call.go index 02163ef7..709b1e30 100644 --- a/internal/terraform/addrs/module_call.go +++ b/internal/terraform/addrs/module_call.go @@ -6,9 +6,6 @@ import ( // ModuleCall is the address of a call from the current module to a child // module. -// -// There is no "Abs" version of ModuleCall because an absolute module path -// is represented by ModuleInstance. type ModuleCall struct { referenceable Name string @@ -18,6 +15,12 @@ func (c ModuleCall) String() string { return "module." + c.Name } +func (c ModuleCall) UniqueKey() UniqueKey { + return c // A ModuleCall is its own UniqueKey +} + +func (c ModuleCall) uniqueKeySigil() {} + // Instance returns the address of an instance of the receiver identified by // the given key. func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance { @@ -27,9 +30,68 @@ func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance { } } +func (c ModuleCall) Absolute(moduleAddr ModuleInstance) AbsModuleCall { + return AbsModuleCall{ + Module: moduleAddr, + Call: c, + } +} + +func (c ModuleCall) Equal(other ModuleCall) bool { + return c.Name == other.Name +} + +// AbsModuleCall is the address of a "module" block relative to the root +// of the configuration. +// +// This is similar to ModuleInstance alone, but specifically represents +// the module block itself rather than any one of the instances that +// module block declares. +type AbsModuleCall struct { + Module ModuleInstance + Call ModuleCall +} + +func (c AbsModuleCall) absMoveableSigil() { + // AbsModuleCall is "moveable". +} + +func (c AbsModuleCall) String() string { + if len(c.Module) == 0 { + return "module." + c.Call.Name + + } + return fmt.Sprintf("%s.module.%s", c.Module, c.Call.Name) +} + +func (c AbsModuleCall) Instance(key InstanceKey) ModuleInstance { + ret := make(ModuleInstance, len(c.Module), len(c.Module)+1) + copy(ret, c.Module) + ret = append(ret, ModuleInstanceStep{ + Name: c.Call.Name, + InstanceKey: key, + }) + return ret +} + +func (c AbsModuleCall) Equal(other AbsModuleCall) bool { + return c.Module.Equal(other.Module) && c.Call.Equal(other.Call) +} + +type absModuleCallInstanceKey string + +func (c AbsModuleCall) UniqueKey() UniqueKey { + return absModuleCallInstanceKey(c.String()) +} + +func (mk absModuleCallInstanceKey) uniqueKeySigil() {} + // ModuleCallInstance is the address of one instance of a module created from // a module call, which might create multiple instances using "count" or // "for_each" arguments. +// +// There is no "Abs" version of ModuleCallInstance because an absolute module +// path is represented by ModuleInstance. type ModuleCallInstance struct { referenceable Call ModuleCall @@ -43,6 +105,22 @@ func (c ModuleCallInstance) String() string { return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) } +func (c ModuleCallInstance) UniqueKey() UniqueKey { + return c // A ModuleCallInstance is its own UniqueKey +} + +func (c ModuleCallInstance) uniqueKeySigil() {} + +func (c ModuleCallInstance) Absolute(moduleAddr ModuleInstance) ModuleInstance { + ret := make(ModuleInstance, len(moduleAddr), len(moduleAddr)+1) + copy(ret, moduleAddr) + ret = append(ret, ModuleInstanceStep{ + Name: c.Call.Name, + InstanceKey: c.Key, + }) + return ret +} + // ModuleInstance returns the address of the module instance that corresponds // to the receiving call instance when resolved in the given calling module. // In other words, it returns the child module instance that the receving @@ -53,8 +131,8 @@ func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance // Output returns the absolute address of an output of the receiver identified by its // name. -func (c ModuleCallInstance) Output(name string) AbsModuleCallOutput { - return AbsModuleCallOutput{ +func (c ModuleCallInstance) Output(name string) ModuleCallInstanceOutput { + return ModuleCallInstanceOutput{ Call: c, Name: name, } @@ -72,9 +150,15 @@ func (m ModuleCallOutput) String() string { return fmt.Sprintf("%s.%s", m.Call.String(), m.Name) } -// AbsModuleCallOutput is the address of a particular named output produced by +func (m ModuleCallOutput) UniqueKey() UniqueKey { + return m // A ModuleCallOutput is its own UniqueKey +} + +func (m ModuleCallOutput) uniqueKeySigil() {} + +// ModuleCallInstanceOutput is the address of a particular named output produced by // an instance of a module call. -type AbsModuleCallOutput struct { +type ModuleCallInstanceOutput struct { referenceable Call ModuleCallInstance Name string @@ -82,21 +166,27 @@ type AbsModuleCallOutput struct { // ModuleCallOutput returns the referenceable ModuleCallOutput for this // particular instance. -func (co AbsModuleCallOutput) ModuleCallOutput() ModuleCallOutput { +func (co ModuleCallInstanceOutput) ModuleCallOutput() ModuleCallOutput { return ModuleCallOutput{ Call: co.Call.Call, Name: co.Name, } } -func (co AbsModuleCallOutput) String() string { +func (co ModuleCallInstanceOutput) String() string { return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) } +func (co ModuleCallInstanceOutput) UniqueKey() UniqueKey { + return co // A ModuleCallInstanceOutput is its own UniqueKey +} + +func (co ModuleCallInstanceOutput) uniqueKeySigil() {} + // AbsOutputValue returns the absolute output value address that corresponds // to the receving module call output address, once resolved in the given // calling module. -func (co AbsModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { +func (co ModuleCallInstanceOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { moduleAddr := co.Call.ModuleInstance(caller) return moduleAddr.OutputValue(co.Name) } diff --git a/internal/terraform/addrs/module_instance.go b/internal/terraform/addrs/module_instance.go index 063a33af..66926f28 100644 --- a/internal/terraform/addrs/module_instance.go +++ b/internal/terraform/addrs/module_instance.go @@ -1,8 +1,8 @@ package addrs import ( - "bytes" "fmt" + "strings" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" @@ -237,6 +237,15 @@ func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { }) } +// ChildCall returns the address of a module call within the receiver, +// identified by the given name. +func (m ModuleInstance) ChildCall(name string) AbsModuleCall { + return AbsModuleCall{ + Module: m, + Call: ModuleCall{Name: name}, + } +} + // Parent returns the address of the parent module instance of the receiver, or // the receiver itself if there is no parent (if it's the root module address). func (m ModuleInstance) Parent() ModuleInstance { @@ -251,7 +260,17 @@ func (m ModuleInstance) Parent() ModuleInstance { // // The address of the root module has the empty string as its representation. func (m ModuleInstance) String() string { - var buf bytes.Buffer + if len(m) == 0 { + return "" + } + // Calculate minimal necessary space (no instance keys). + l := 0 + for _, step := range m { + l += len(step.Name) + } + buf := strings.Builder{} + // 8 is len(".module.") which separates entries. + buf.Grow(l + len(m)*8) sep := "" for _, step := range m { buf.WriteString(sep) @@ -265,6 +284,14 @@ func (m ModuleInstance) String() string { return buf.String() } +type moduleInstanceKey string + +func (m ModuleInstance) UniqueKey() UniqueKey { + return moduleInstanceKey(m.String()) +} + +func (mk moduleInstanceKey) uniqueKeySigil() {} + // Equal returns true if the receiver and the given other value // contains the exact same parts. func (m ModuleInstance) Equal(o ModuleInstance) bool { @@ -474,10 +501,40 @@ func (m ModuleInstance) Module() Module { return ret } +func (m ModuleInstance) AddrType() TargetableAddrType { + return ModuleInstanceAddrType +} + func (m ModuleInstance) targetableSigil() { // ModuleInstance is targetable } +func (m ModuleInstance) absMoveableSigil() { + // ModuleInstance is moveable +} + +// IsDeclaredByCall returns true if the receiver is an instance of the given +// AbsModuleCall. +func (m ModuleInstance) IsDeclaredByCall(other AbsModuleCall) bool { + // Compare len(m) to len(other.Module+1) because the final module instance + // step in other is stored in the AbsModuleCall.Call + if len(m) > len(other.Module)+1 || len(m) == 0 && len(other.Module) == 0 { + return false + } + + // Verify that the other's ModuleInstance matches the receiver. + inst, lastStep := other.Module, other.Call + for i := range inst { + if inst[i] != m[i] { + return false + } + } + + // Now compare the final step of the received with the other Call, where + // only the name needs to match. + return lastStep.Name == m[len(m)-1].Name +} + func (s ModuleInstanceStep) String() string { if s.InstanceKey != NoKey { return s.Name + s.InstanceKey.String() diff --git a/internal/terraform/addrs/module_instance_test.go b/internal/terraform/addrs/module_instance_test.go index c36e9952..393bcd57 100644 --- a/internal/terraform/addrs/module_instance_test.go +++ b/internal/terraform/addrs/module_instance_test.go @@ -77,3 +77,94 @@ func TestModuleInstanceEqual_false(t *testing.T) { }) } } + +func BenchmarkStringShort(b *testing.B) { + addr, _ := ParseModuleInstanceStr(`module.foo`) + for n := 0; n < b.N; n++ { + addr.String() + } +} + +func BenchmarkStringLong(b *testing.B) { + addr, _ := ParseModuleInstanceStr(`module.southamerica-brazil-region.module.user-regional-desktops.module.user-name`) + for n := 0; n < b.N; n++ { + addr.String() + } +} + +func TestModuleInstance_IsDeclaredByCall(t *testing.T) { + tests := []struct { + instance ModuleInstance + call AbsModuleCall + want bool + }{ + { + ModuleInstance{}, + AbsModuleCall{}, + false, + }, + { + mustParseModuleInstanceStr("module.child"), + AbsModuleCall{}, + false, + }, + { + ModuleInstance{}, + AbsModuleCall{ + RootModuleInstance, + ModuleCall{Name: "child"}, + }, + false, + }, + { + mustParseModuleInstanceStr("module.child"), + AbsModuleCall{ // module.child + RootModuleInstance, + ModuleCall{Name: "child"}, + }, + true, + }, + { + mustParseModuleInstanceStr(`module.child`), + AbsModuleCall{ // module.kinder.module.child + mustParseModuleInstanceStr("module.kinder"), + ModuleCall{Name: "child"}, + }, + false, + }, + { + mustParseModuleInstanceStr("module.kinder"), + // module.kinder.module.child contains module.kinder, but is not itself an instance of module.kinder + AbsModuleCall{ + mustParseModuleInstanceStr("module.kinder"), + ModuleCall{Name: "child"}, + }, + false, + }, + { + mustParseModuleInstanceStr("module.child"), + AbsModuleCall{ + mustParseModuleInstanceStr(`module.kinder["a"]`), + ModuleCall{Name: "kinder"}, + }, + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%q.IsCallInstance(%q)", test.instance, test.call.String()), func(t *testing.T) { + got := test.instance.IsDeclaredByCall(test.call) + if got != test.want { + t.Fatal("wrong result") + } + }) + } +} + +func mustParseModuleInstanceStr(str string) ModuleInstance { + mi, diags := ParseModuleInstanceStr(str) + if diags.HasErrors() { + panic(diags.ErrWithWarnings()) + } + return mi +} diff --git a/internal/terraform/addrs/module_package.go b/internal/terraform/addrs/module_package.go new file mode 100644 index 00000000..dc5a6621 --- /dev/null +++ b/internal/terraform/addrs/module_package.go @@ -0,0 +1,89 @@ +package addrs + +import ( + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// A ModulePackage represents a physical location where Terraform can retrieve +// a module package, which is an archive, repository, or other similar +// container which delivers the source code for one or more Terraform modules. +// +// A ModulePackage is a string in go-getter's address syntax. By convention, +// we use ModulePackage-typed values only for the result of successfully +// running the go-getter "detectors", which produces an address string which +// includes an explicit installation method prefix along with an address +// string in the format expected by that installation method. +// +// Note that although the "detector" phase of go-getter does do some simple +// normalization in certain cases, it isn't generally possible to compare +// two ModulePackage values to decide if they refer to the same package. Two +// equal ModulePackage values represent the same package, but there might be +// other non-equal ModulePackage values that also refer to that package, and +// there is no reliable way to determine that. +// +// Don't convert a user-provided string directly to ModulePackage. Instead, +// use ParseModuleSource with a remote module address and then access the +// ModulePackage value from the result, making sure to also handle the +// selected subdirectory if any. You should convert directly to ModulePackage +// only for a string that is hard-coded into the program (e.g. in a unit test) +// where you've ensured that it's already in the expected syntax. +type ModulePackage string + +func (p ModulePackage) String() string { + return string(p) +} + +// A ModuleRegistryPackage is an extra indirection over a ModulePackage where +// we use a module registry to translate a more symbolic address (and +// associated version constraint given out of band) into a physical source +// location. +// +// ModuleRegistryPackage is distinct from ModulePackage because they have +// disjoint use-cases: registry package addresses are only used to query a +// registry in order to find a real module package address. These being +// distinct is intended to help future maintainers more easily follow the +// series of steps in the module installer, with the help of the type checker. +type ModuleRegistryPackage struct { + Host svchost.Hostname + Namespace string + Name string + TargetSystem string +} + +func (s ModuleRegistryPackage) String() string { + var buf strings.Builder + // Note: we're using the "display" form of the hostname here because + // for our service hostnames "for display" means something different: + // it means to render non-ASCII characters directly as Unicode + // characters, rather than using the "punycode" representation we + // use for internal processing, and so the "display" representation + // is actually what users would write in their configurations. + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() + return buf.String() +} + +func (s ModuleRegistryPackage) ForDisplay() string { + if s.Host == DefaultModuleRegistryHost { + return s.ForRegistryProtocol() + } + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +// ForRegistryProtocol returns a string representation of just the namespace, +// name, and target system portions of the address, always omitting the +// registry hostname and the subdirectory portion, if any. +// +// This is primarily intended for generating addresses to send to the +// registry in question via the registry protocol, since the protocol +// skips sending the registry its own hostname as part of identifiers. +func (s ModuleRegistryPackage) ForRegistryProtocol() string { + var buf strings.Builder + buf.WriteString(s.Namespace) + buf.WriteByte('/') + buf.WriteString(s.Name) + buf.WriteByte('/') + buf.WriteString(s.TargetSystem) + return buf.String() +} diff --git a/internal/terraform/addrs/module_source.go b/internal/terraform/addrs/module_source.go new file mode 100644 index 00000000..01f3a44f --- /dev/null +++ b/internal/terraform/addrs/module_source.go @@ -0,0 +1,495 @@ +package addrs + +import ( + "fmt" + "path" + "regexp" + "strings" + + "github.com/camptocamp/terraboard/internal/terraform/getmodules" + svchost "github.com/hashicorp/terraform-svchost" +) + +// ModuleSource is the general type for all three of the possible module source +// address types. The concrete implementations of this are ModuleSourceLocal, +// ModuleSourceRegistry, and ModuleSourceRemote. +type ModuleSource interface { + // String returns a full representation of the address, including any + // additional components that are typically implied by omission in + // user-written addresses. + // + // We typically use this longer representation in error message, in case + // the inclusion of normally-omitted components is helpful in debugging + // unexpected behavior. + String() string + + // ForDisplay is similar to String but instead returns a representation of + // the idiomatic way to write the address in configuration, omitting + // components that are commonly just implied in addresses written by + // users. + // + // We typically use this shorter representation in informational messages, + // such as the note that we're about to start downloading a package. + ForDisplay() string + + moduleSource() +} + +var _ ModuleSource = ModuleSourceLocal("") +var _ ModuleSource = ModuleSourceRegistry{} +var _ ModuleSource = ModuleSourceRemote{} + +var moduleSourceLocalPrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +// ParseModuleSource parses a module source address as given in the "source" +// argument inside a "module" block in the configuration. +// +// For historical reasons this syntax is a bit overloaded, supporting three +// different address types: +// - Local paths starting with either ./ or ../, which are special because +// Terraform considers them to belong to the same "package" as the caller. +// - Module registry addresses, given as either NAMESPACE/NAME/SYSTEM or +// HOST/NAMESPACE/NAME/SYSTEM, in which case the remote registry serves +// as an indirection over the third address type that follows. +// - Various URL-like and other heuristically-recognized strings which +// we currently delegate to the external library go-getter. +// +// There is some ambiguity between the module registry addresses and go-getter's +// very liberal heuristics and so this particular function will typically treat +// an invalid registry address as some other sort of remote source address +// rather than returning an error. If you know that you're expecting a +// registry address in particular, use ParseModuleSourceRegistry instead, which +// can therefore expose more detailed error messages about registry address +// parsing in particular. +func ParseModuleSource(raw string) (ModuleSource, error) { + if isModuleSourceLocal(raw) { + localAddr, err := parseModuleSourceLocal(raw) + if err != nil { + // This is to make sure we really return a nil ModuleSource in + // this case, rather than an interface containing the zero + // value of ModuleSourceLocal. + return nil, err + } + return localAddr, nil + } + + // For historical reasons, whether an address is a registry + // address is defined only by whether it can be successfully + // parsed as one, and anything else must fall through to be + // parsed as a direct remote source, where go-getter might + // then recognize it as a filesystem path. This is odd + // but matches behavior we've had since Terraform v0.10 which + // existing modules may be relying on. + // (Notice that this means that there's never any path where + // the registry source parse error gets returned to the caller, + // which is annoying but has been true for many releases + // without it posing a serious problem in practice.) + if ret, err := ParseModuleSourceRegistry(raw); err == nil { + return ret, nil + } + + // If we get down here then we treat everything else as a + // remote address. In practice there's very little that + // go-getter doesn't consider invalid input, so even invalid + // nonsense will probably interpreted as _something_ here + // and then fail during installation instead. We can't + // really improve this situation for historical reasons. + remoteAddr, err := parseModuleSourceRemote(raw) + if err != nil { + // This is to make sure we really return a nil ModuleSource in + // this case, rather than an interface containing the zero + // value of ModuleSourceRemote. + return nil, err + } + return remoteAddr, nil +} + +// ModuleSourceLocal is a ModuleSource representing a local path reference +// from the caller's directory to the callee's directory within the same +// module package. +// +// A "module package" here means a set of modules distributed together in +// the same archive, repository, or similar. That's a significant distinction +// because we always download and cache entire module packages at once, +// and then create relative references within the same directory in order +// to ensure all modules in the package are looking at a consistent filesystem +// layout. We also assume that modules within a package are maintained together, +// which means that cross-cutting maintenence across all of them would be +// possible. +// +// The actual value of a ModuleSourceLocal is a normalized relative path using +// forward slashes, even on operating systems that have other conventions, +// because we're representing traversal within the logical filesystem +// represented by the containing package, not actually within the physical +// filesystem we unpacked the package into. We should typically not construct +// ModuleSourceLocal values directly, except in tests where we can ensure +// the value meets our assumptions. Use ParseModuleSource instead if the +// input string is not hard-coded in the program. +type ModuleSourceLocal string + +func parseModuleSourceLocal(raw string) (ModuleSourceLocal, error) { + // As long as we have a suitable prefix (detected by ParseModuleSource) + // there is no failure case for local paths: we just use the "path" + // package's cleaning logic to remove any redundant "./" and "../" + // sequences and any duplicate slashes and accept whatever that + // produces. + + // Although using backslashes (Windows-style) is non-idiomatic, we do + // allow it and just normalize it away, so the rest of Terraform will + // only see the forward-slash form. + if strings.Contains(raw, `\`) { + // Note: We use string replacement rather than filepath.ToSlash + // here because the filepath package behavior varies by current + // platform, but we want to interpret configured paths the same + // across all platforms: these are virtual paths within a module + // package, not physical filesystem paths. + raw = strings.ReplaceAll(raw, `\`, "/") + } + + // Note that we could've historically blocked using "//" in a path here + // in order to avoid confusion with the subdir syntax in remote addresses, + // but we historically just treated that as the same as a single slash + // and so we continue to do that now for compatibility. Clean strips those + // out and reduces them to just a single slash. + clean := path.Clean(raw) + + // However, we do need to keep a single "./" on the front if it isn't + // a "../" path, or else it would be ambigous with the registry address + // syntax. + if !strings.HasPrefix(clean, "../") { + clean = "./" + clean + } + + return ModuleSourceLocal(clean), nil +} + +func isModuleSourceLocal(raw string) bool { + for _, prefix := range moduleSourceLocalPrefixes { + if strings.HasPrefix(raw, prefix) { + return true + } + } + return false +} + +func (s ModuleSourceLocal) moduleSource() {} + +func (s ModuleSourceLocal) String() string { + // We assume that our underlying string was already normalized at + // construction, so we just return it verbatim. + return string(s) +} + +func (s ModuleSourceLocal) ForDisplay() string { + return string(s) +} + +// ModuleSourceRegistry is a ModuleSource representing a module listed in a +// Terraform module registry. +// +// A registry source isn't a direct source location but rather an indirection +// over a ModuleSourceRemote. The job of a registry is to translate the +// combination of a ModuleSourceRegistry and a module version number into +// a concrete ModuleSourceRemote that Terraform will then download and +// install. +type ModuleSourceRegistry struct { + // PackageAddr is the registry package that the target module belongs to. + // The module installer must translate this into a ModuleSourceRemote + // using the registry API and then take that underlying address's + // PackageAddr in order to find the actual package location. + PackageAddr ModuleRegistryPackage + + // If Subdir is non-empty then it represents a sub-directory within the + // remote package that the registry address eventually resolves to. + // This will ultimately become the suffix of the Subdir of the + // ModuleSourceRemote that the registry address translates to. + // + // Subdir uses a normalized forward-slash-based path syntax within the + // virtual filesystem represented by the final package. It will never + // include `../` or `./` sequences. + Subdir string +} + +// DefaultModuleRegistryHost is the hostname used for registry-based module +// source addresses that do not have an explicit hostname. +const DefaultModuleRegistryHost = svchost.Hostname("registry.terraform.io") + +var moduleRegistryNamePattern = regexp.MustCompile("^[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?$") +var moduleRegistryTargetSystemPattern = regexp.MustCompile("^[0-9a-z]{1,64}$") + +// ParseModuleSourceRegistry is a variant of ParseModuleSource which only +// accepts module registry addresses, and will reject any other address type. +// +// Use this instead of ParseModuleSource if you know from some other surrounding +// context that an address is intended to be a registry address rather than +// some other address type, which will then allow for better error reporting +// due to the additional information about user intent. +func ParseModuleSourceRegistry(raw string) (ModuleSource, error) { + // Before we delegate to the "real" function we'll just make sure this + // doesn't look like a local source address, so we can return a better + // error message for that situation. + if isModuleSourceLocal(raw) { + return ModuleSourceRegistry{}, fmt.Errorf("can't use local directory %q as a module registry address", raw) + } + + ret, err := parseModuleSourceRegistry(raw) + if err != nil { + // This is to make sure we return a nil ModuleSource, rather than + // a non-nil ModuleSource containing a zero-value ModuleSourceRegistry. + return nil, err + } + return ret, nil +} + +func parseModuleSourceRegistry(raw string) (ModuleSourceRegistry, error) { + var err error + + var subDir string + raw, subDir = getmodules.SplitPackageSubdir(raw) + if strings.HasPrefix(subDir, "../") { + return ModuleSourceRegistry{}, fmt.Errorf("subdirectory path %q leads outside of the module package", subDir) + } + + parts := strings.Split(raw, "/") + // A valid registry address has either three or four parts, because the + // leading hostname part is optional. + if len(parts) != 3 && len(parts) != 4 { + return ModuleSourceRegistry{}, fmt.Errorf("a module registry source address must have either three or four slash-separated components") + } + + host := DefaultModuleRegistryHost + if len(parts) == 4 { + host, err = svchost.ForComparison(parts[0]) + if err != nil { + // The svchost library doesn't produce very good error messages to + // return to an end-user, so we'll use some custom ones here. + switch { + case strings.Contains(parts[0], "--"): + // Looks like possibly punycode, which we don't allow here + // to ensure that source addresses are written readably. + return ModuleSourceRegistry{}, fmt.Errorf("invalid module registry hostname %q; internationalized domain names must be given as direct unicode characters, not in punycode", parts[0]) + default: + return ModuleSourceRegistry{}, fmt.Errorf("invalid module registry hostname %q", parts[0]) + } + } + if !strings.Contains(host.String(), ".") { + return ModuleSourceRegistry{}, fmt.Errorf("invalid module registry hostname: must contain at least one dot") + } + // Discard the hostname prefix now that we've processed it + parts = parts[1:] + } + + ret := ModuleSourceRegistry{ + PackageAddr: ModuleRegistryPackage{ + Host: host, + }, + + Subdir: subDir, + } + + if host == svchost.Hostname("github.com") || host == svchost.Hostname("bitbucket.org") { + return ret, fmt.Errorf("can't use %q as a module registry host, because it's reserved for installing directly from version control repositories", host) + } + + if ret.PackageAddr.Namespace, err = parseModuleRegistryName(parts[0]); err != nil { + if strings.Contains(parts[0], ".") { + // Seems like the user omitted one of the latter components in + // an address with an explicit hostname. + return ret, fmt.Errorf("source address must have three more components after the hostname: the namespace, the name, and the target system") + } + return ret, fmt.Errorf("invalid namespace %q: %s", parts[0], err) + } + if ret.PackageAddr.Name, err = parseModuleRegistryName(parts[1]); err != nil { + return ret, fmt.Errorf("invalid module name %q: %s", parts[1], err) + } + if ret.PackageAddr.TargetSystem, err = parseModuleRegistryTargetSystem(parts[2]); err != nil { + if strings.Contains(parts[2], "?") { + // The user was trying to include a query string, probably? + return ret, fmt.Errorf("module registry addresses may not include a query string portion") + } + return ret, fmt.Errorf("invalid target system %q: %s", parts[2], err) + } + + return ret, nil +} + +// parseModuleRegistryName validates and normalizes a string in either the +// "namespace" or "name" position of a module registry source address. +func parseModuleRegistryName(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can only use dashes as + // punctuation, whereas module names can use underscores. So here we're + // using some regular expressions from the original module source + // implementation, rather than using the IDNA rules as we do in + // ParseProviderPart. + + if !moduleRegistryNamePattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 characters, including ASCII letters, digits, dashes, and underscores, where dashes and underscores may not be the prefix or suffix") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// parseModuleRegistryTargetSystem validates and normalizes a string in the +// "target system" position of a module registry source address. This is +// what we historically called "provider" but never actually enforced as +// being a provider address, and now _cannot_ be a provider address because +// provider addresses have three slash-separated components of their own. +func parseModuleRegistryTargetSystem(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can't use dashes or + // underscores. So here we're using some regular expressions from the + // original module source implementation, rather than using the IDNA rules + // as we do in ParseProviderPart. + + if !moduleRegistryTargetSystemPattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 ASCII letters or digits") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +func (s ModuleSourceRegistry) moduleSource() {} + +func (s ModuleSourceRegistry) String() string { + if s.Subdir != "" { + return s.PackageAddr.String() + "//" + s.Subdir + } + return s.PackageAddr.String() +} + +func (s ModuleSourceRegistry) ForDisplay() string { + if s.Subdir != "" { + return s.PackageAddr.ForDisplay() + "//" + s.Subdir + } + return s.PackageAddr.ForDisplay() +} + +// ModuleSourceRemote is a ModuleSource representing a remote location from +// which we can retrieve a module package. +// +// A ModuleSourceRemote can optionally include a "subdirectory" path, which +// means that it's selecting a sub-directory of the given package to use as +// the entry point into the package. +type ModuleSourceRemote struct { + // PackageAddr is the address of the remote package that the requested + // module belongs to. + PackageAddr ModulePackage + + // If Subdir is non-empty then it represents a sub-directory within the + // remote package which will serve as the entry-point for the package. + // + // Subdir uses a normalized forward-slash-based path syntax within the + // virtual filesystem represented by the final package. It will never + // include `../` or `./` sequences. + Subdir string +} + +func parseModuleSourceRemote(raw string) (ModuleSourceRemote, error) { + var subDir string + raw, subDir = getmodules.SplitPackageSubdir(raw) + if strings.HasPrefix(subDir, "../") { + return ModuleSourceRemote{}, fmt.Errorf("subdirectory path %q leads outside of the module package", subDir) + } + + // A remote source address is really just a go-getter address resulting + // from go-getter's "detect" phase, which adds on the prefix specifying + // which protocol it should use and possibly also adjusts the + // protocol-specific part into different syntax. + // + // Note that for historical reasons this can potentially do network + // requests in order to disambiguate certain address types, although + // that's a legacy thing that is only for some specific, less-commonly-used + // address types. Most just do local string manipulation. We should + // aim to remove the network requests over time, if possible. + norm, moreSubDir, err := getmodules.NormalizePackageAddress(raw) + if err != nil { + // We must pass through the returned error directly here because + // the getmodules package has some special error types it uses + // for certain cases where the UI layer might want to include a + // more helpful error message. + return ModuleSourceRemote{}, err + } + + if moreSubDir != "" { + switch { + case subDir != "": + // The detector's own subdir goes first, because the + // subdir we were given is conceptually relative to + // the subdirectory that we just detected. + subDir = path.Join(moreSubDir, subDir) + default: + subDir = path.Clean(moreSubDir) + } + if strings.HasPrefix(subDir, "../") { + // This would suggest a bug in a go-getter detector, but + // we'll catch it anyway to avoid doing something confusing + // downstream. + return ModuleSourceRemote{}, fmt.Errorf("detected subdirectory path %q of %q leads outside of the module package", subDir, norm) + } + } + + return ModuleSourceRemote{ + PackageAddr: ModulePackage(norm), + Subdir: subDir, + }, nil +} + +func (s ModuleSourceRemote) moduleSource() {} + +func (s ModuleSourceRemote) String() string { + if s.Subdir != "" { + return s.PackageAddr.String() + "//" + s.Subdir + } + return s.PackageAddr.String() +} + +func (s ModuleSourceRemote) ForDisplay() string { + // The two string representations are identical for this address type. + // This isn't really entirely true to the idea of "ForDisplay" since + // it'll often include some additional components added in by the + // go-getter detectors, but we don't have any function to turn a + // "detected" string back into an idiomatic shorthand the user might've + // entered. + return s.String() +} + +// FromRegistry can be called on a remote source address that was returned +// from a module registry, passing in the original registry source address +// that the registry was asked about, in order to get the effective final +// remote source address. +// +// Specifically, this method handles the situations where one or both of +// the two addresses contain subdirectory paths, combining both when necessary +// in order to ensure that both the registry's given path and the user's +// given path are both respected. +// +// This will return nonsense if given a registry address other than the one +// that generated the reciever via a registry lookup. +func (s ModuleSourceRemote) FromRegistry(given ModuleSourceRegistry) ModuleSourceRemote { + ret := s // not a pointer, so this is a shallow copy + + switch { + case s.Subdir != "" && given.Subdir != "": + ret.Subdir = path.Join(s.Subdir, given.Subdir) + case given.Subdir != "": + ret.Subdir = given.Subdir + } + + return ret +} diff --git a/internal/terraform/addrs/module_source_test.go b/internal/terraform/addrs/module_source_test.go new file mode 100644 index 00000000..9604c337 --- /dev/null +++ b/internal/terraform/addrs/module_source_test.go @@ -0,0 +1,573 @@ +package addrs + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + svchost "github.com/hashicorp/terraform-svchost" +) + +func TestParseModuleSource(t *testing.T) { + tests := map[string]struct { + input string + want ModuleSource + wantErr string + }{ + // Local paths + "local in subdirectory": { + input: "./child", + want: ModuleSourceLocal("./child"), + }, + "local in subdirectory non-normalized": { + input: "./nope/../child", + want: ModuleSourceLocal("./child"), + }, + "local in sibling directory": { + input: "../sibling", + want: ModuleSourceLocal("../sibling"), + }, + "local in sibling directory non-normalized": { + input: "./nope/../../sibling", + want: ModuleSourceLocal("../sibling"), + }, + "Windows-style local in subdirectory": { + input: `.\child`, + want: ModuleSourceLocal("./child"), + }, + "Windows-style local in subdirectory non-normalized": { + input: `.\nope\..\child`, + want: ModuleSourceLocal("./child"), + }, + "Windows-style local in sibling directory": { + input: `..\sibling`, + want: ModuleSourceLocal("../sibling"), + }, + "Windows-style local in sibling directory non-normalized": { + input: `.\nope\..\..\sibling`, + want: ModuleSourceLocal("../sibling"), + }, + "an abominable mix of different slashes": { + input: `./nope\nope/why\./please\don't`, + want: ModuleSourceLocal("./nope/nope/why/please/don't"), + }, + + // Registry addresses + // (NOTE: There is another test function TestParseModuleSourceRegistry + // which tests this situation more exhaustively, so this is just a + // token set of cases to see that we are indeed calling into the + // registry address parser when appropriate.) + "main registry implied": { + input: "hashicorp/subnets/cidr", + want: ModuleSourceRegistry{ + PackageAddr: ModuleRegistryPackage{ + Host: svchost.Hostname("registry.terraform.io"), + Namespace: "hashicorp", + Name: "subnets", + TargetSystem: "cidr", + }, + Subdir: "", + }, + }, + "main registry implied, subdir": { + input: "hashicorp/subnets/cidr//examples/foo", + want: ModuleSourceRegistry{ + PackageAddr: ModuleRegistryPackage{ + Host: svchost.Hostname("registry.terraform.io"), + Namespace: "hashicorp", + Name: "subnets", + TargetSystem: "cidr", + }, + Subdir: "examples/foo", + }, + }, + "main registry implied, escaping subdir": { + input: "hashicorp/subnets/cidr//../nope", + // NOTE: This error is actually being caught by the _remote package_ + // address parser, because any registry parsing failure falls back + // to that but both of them have the same subdir validation. This + // case is here to make sure that stays true, so we keep reporting + // a suitable error when the user writes a registry-looking thing. + wantErr: `subdirectory path "../nope" leads outside of the module package`, + }, + "custom registry": { + input: "example.com/awesomecorp/network/happycloud", + want: ModuleSourceRegistry{ + PackageAddr: ModuleRegistryPackage{ + Host: svchost.Hostname("example.com"), + Namespace: "awesomecorp", + Name: "network", + TargetSystem: "happycloud", + }, + Subdir: "", + }, + }, + "custom registry, subdir": { + input: "example.com/awesomecorp/network/happycloud//examples/foo", + want: ModuleSourceRegistry{ + PackageAddr: ModuleRegistryPackage{ + Host: svchost.Hostname("example.com"), + Namespace: "awesomecorp", + Name: "network", + TargetSystem: "happycloud", + }, + Subdir: "examples/foo", + }, + }, + + // Remote package addresses + "github.com shorthand": { + input: "github.com/hashicorp/terraform-cidr-subnets", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("git::https://github.com/hashicorp/terraform-cidr-subnets.git"), + }, + }, + "github.com shorthand, subdir": { + input: "github.com/hashicorp/terraform-cidr-subnets//example/foo", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("git::https://github.com/hashicorp/terraform-cidr-subnets.git"), + Subdir: "example/foo", + }, + }, + "git protocol, URL-style": { + input: "git://example.com/code/baz.git", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("git://example.com/code/baz.git"), + }, + }, + "git protocol, URL-style, subdir": { + input: "git://example.com/code/baz.git//bleep/bloop", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("git://example.com/code/baz.git"), + Subdir: "bleep/bloop", + }, + }, + "git over HTTPS, URL-style": { + input: "git::https://example.com/code/baz.git", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("git::https://example.com/code/baz.git"), + }, + }, + "git over HTTPS, URL-style, subdir": { + input: "git::https://example.com/code/baz.git//bleep/bloop", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("git::https://example.com/code/baz.git"), + Subdir: "bleep/bloop", + }, + }, + "git over SSH, URL-style": { + input: "git::ssh://git@example.com/code/baz.git", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("git::ssh://git@example.com/code/baz.git"), + }, + }, + "git over SSH, URL-style, subdir": { + input: "git::ssh://git@example.com/code/baz.git//bleep/bloop", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("git::ssh://git@example.com/code/baz.git"), + Subdir: "bleep/bloop", + }, + }, + "git over SSH, scp-style": { + input: "git::git@example.com:code/baz.git", + want: ModuleSourceRemote{ + // Normalized to URL-style + PackageAddr: ModulePackage("git::ssh://git@example.com/code/baz.git"), + }, + }, + "git over SSH, scp-style, subdir": { + input: "git::git@example.com:code/baz.git//bleep/bloop", + want: ModuleSourceRemote{ + // Normalized to URL-style + PackageAddr: ModulePackage("git::ssh://git@example.com/code/baz.git"), + Subdir: "bleep/bloop", + }, + }, + + // NOTE: We intentionally don't test the bitbucket.org shorthands + // here, because that detector makes direct HTTP tequests to the + // Bitbucket API and thus isn't appropriate for unit testing. + + "Google Cloud Storage bucket implied, path prefix": { + input: "www.googleapis.com/storage/v1/BUCKET_NAME/PATH_TO_MODULE", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH_TO_MODULE"), + }, + }, + "Google Cloud Storage bucket, path prefix": { + input: "gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH_TO_MODULE", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH_TO_MODULE"), + }, + }, + "Google Cloud Storage bucket implied, archive object": { + input: "www.googleapis.com/storage/v1/BUCKET_NAME/PATH/TO/module.zip", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH/TO/module.zip"), + }, + }, + "Google Cloud Storage bucket, archive object": { + input: "gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH/TO/module.zip", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH/TO/module.zip"), + }, + }, + + "Amazon S3 bucket implied, archive object": { + input: "s3-eu-west-1.amazonaws.com/examplecorp-terraform-modules/vpc.zip", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("s3::https://s3-eu-west-1.amazonaws.com/examplecorp-terraform-modules/vpc.zip"), + }, + }, + "Amazon S3 bucket, archive object": { + input: "s3::https://s3-eu-west-1.amazonaws.com/examplecorp-terraform-modules/vpc.zip", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("s3::https://s3-eu-west-1.amazonaws.com/examplecorp-terraform-modules/vpc.zip"), + }, + }, + + "HTTP URL": { + input: "http://example.com/module", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("http://example.com/module"), + }, + }, + "HTTPS URL": { + input: "https://example.com/module", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("https://example.com/module"), + }, + }, + "HTTPS URL, archive file": { + input: "https://example.com/module.zip", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("https://example.com/module.zip"), + }, + }, + "HTTPS URL, forced archive file": { + input: "https://example.com/module?archive=tar", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("https://example.com/module?archive=tar"), + }, + }, + "HTTPS URL, forced archive file and checksum": { + input: "https://example.com/module?archive=tar&checksum=blah", + want: ModuleSourceRemote{ + // The query string only actually gets processed when we finally + // do the get, so "checksum=blah" is accepted as valid up + // at this parsing layer. + PackageAddr: ModulePackage("https://example.com/module?archive=tar&checksum=blah"), + }, + }, + + "absolute filesystem path": { + // Although a local directory isn't really "remote", we do + // treat it as such because we still need to do all of the same + // high-level steps to work with these, even though "downloading" + // is replaced by a deep filesystem copy instead. + input: "/tmp/foo/example", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("file:///tmp/foo/example"), + }, + }, + "absolute filesystem path, subdir": { + // This is a funny situation where the user wants to use a + // directory elsewhere on their system as a package containing + // multiple modules, but the entry point is not at the root + // of that subtree, and so they can use the usual subdir + // syntax to move the package root higher in the real filesystem. + input: "/tmp/foo//example", + want: ModuleSourceRemote{ + PackageAddr: ModulePackage("file:///tmp/foo"), + Subdir: "example", + }, + }, + + "subdir escaping out of package": { + // This is general logic for all subdir regardless of installation + // protocol, but we're using a filesystem path here just as an + // easy placeholder/ + input: "/tmp/foo//example/../../invalid", + wantErr: `subdirectory path "../invalid" leads outside of the module package`, + }, + + "relative path without the needed prefix": { + input: "boop/bloop", + // For this case we return a generic error message from the addrs + // layer, but using a specialized error type which our module + // installer checks for and produces an extra hint for users who + // were intending to write a local path which then got + // misinterpreted as a remote source due to the missing prefix. + // However, the main message is generic here because this is really + // just a general "this string doesn't match any of our source + // address patterns" situation, not _necessarily_ about relative + // local paths. + wantErr: `Terraform cannot detect a supported external module source type for boop/bloop`, + }, + + "go-getter will accept all sorts of garbage": { + input: "dfgdfgsd:dgfhdfghdfghdfg/dfghdfghdfg", + want: ModuleSourceRemote{ + // Unfortunately go-getter doesn't actually reject a totally + // invalid address like this until getting time, as long as + // it looks somewhat like a URL. + PackageAddr: ModulePackage("dfgdfgsd:dgfhdfghdfghdfg/dfghdfghdfg"), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + addr, err := ParseModuleSource(test.input) + + if test.wantErr != "" { + switch { + case err == nil: + t.Errorf("unexpected success\nwant error: %s", test.wantErr) + case err.Error() != test.wantErr: + t.Errorf("wrong error messages\ngot: %s\nwant: %s", err.Error(), test.wantErr) + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if diff := cmp.Diff(addr, test.want); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } + +} + +func TestModuleSourceRemoteFromRegistry(t *testing.T) { + t.Run("both have subdir", func(t *testing.T) { + remote := ModuleSourceRemote{ + PackageAddr: ModulePackage("boop"), + Subdir: "foo", + } + registry := ModuleSourceRegistry{ + Subdir: "bar", + } + gotAddr := remote.FromRegistry(registry) + if remote.Subdir != "foo" { + t.Errorf("FromRegistry modified the reciever; should be pure function") + } + if registry.Subdir != "bar" { + t.Errorf("FromRegistry modified the given address; should be pure function") + } + if got, want := gotAddr.Subdir, "foo/bar"; got != want { + t.Errorf("wrong resolved subdir\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("only remote has subdir", func(t *testing.T) { + remote := ModuleSourceRemote{ + PackageAddr: ModulePackage("boop"), + Subdir: "foo", + } + registry := ModuleSourceRegistry{ + Subdir: "", + } + gotAddr := remote.FromRegistry(registry) + if remote.Subdir != "foo" { + t.Errorf("FromRegistry modified the reciever; should be pure function") + } + if registry.Subdir != "" { + t.Errorf("FromRegistry modified the given address; should be pure function") + } + if got, want := gotAddr.Subdir, "foo"; got != want { + t.Errorf("wrong resolved subdir\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("only registry has subdir", func(t *testing.T) { + remote := ModuleSourceRemote{ + PackageAddr: ModulePackage("boop"), + Subdir: "", + } + registry := ModuleSourceRegistry{ + Subdir: "bar", + } + gotAddr := remote.FromRegistry(registry) + if remote.Subdir != "" { + t.Errorf("FromRegistry modified the reciever; should be pure function") + } + if registry.Subdir != "bar" { + t.Errorf("FromRegistry modified the given address; should be pure function") + } + if got, want := gotAddr.Subdir, "bar"; got != want { + t.Errorf("wrong resolved subdir\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestParseModuleSourceRegistry(t *testing.T) { + // We test parseModuleSourceRegistry alone here, in addition to testing + // it indirectly as part of TestParseModuleSource, because general + // module parsing unfortunately eats all of the error situations from + // registry passing by falling back to trying for a direct remote package + // address. + + // Historical note: These test cases were originally derived from the + // ones in the old internal/registry/regsrc package that the + // ModuleSourceRegistry type is replacing. That package had the notion + // of "normalized" addresses as separate from the original user input, + // but this new implementation doesn't try to preserve the original + // user input at all, and so the main string output is always normalized. + // + // That package also had some behaviors to turn the namespace, name, and + // remote system portions into lowercase, but apparently we didn't + // actually make use of that in the end and were preserving the case + // the user provided in the input, and so for backward compatibility + // we're continuing to do that here, at the expense of now making the + // "ForDisplay" output case-preserving where its predecessor in the + // old package wasn't. The main Terraform Registry at registry.terraform.io + // is itself case-insensitive anyway, so our case-preserving here is + // entirely for the benefit of existing third-party registry + // implementations that might be case-sensitive, which we must remain + // compatible with now. + + tests := map[string]struct { + input string + wantString string + wantForDisplay string + wantForProtocol string + wantErr string + }{ + "public registry": { + input: `hashicorp/consul/aws`, + wantString: `registry.terraform.io/hashicorp/consul/aws`, + wantForDisplay: `hashicorp/consul/aws`, + wantForProtocol: `hashicorp/consul/aws`, + }, + "public registry with subdir": { + input: `hashicorp/consul/aws//foo`, + wantString: `registry.terraform.io/hashicorp/consul/aws//foo`, + wantForDisplay: `hashicorp/consul/aws//foo`, + wantForProtocol: `hashicorp/consul/aws`, + }, + "public registry using explicit hostname": { + input: `registry.terraform.io/hashicorp/consul/aws`, + wantString: `registry.terraform.io/hashicorp/consul/aws`, + wantForDisplay: `hashicorp/consul/aws`, + wantForProtocol: `hashicorp/consul/aws`, + }, + "public registry with mixed case names": { + input: `HashiCorp/Consul/aws`, + wantString: `registry.terraform.io/HashiCorp/Consul/aws`, + wantForDisplay: `HashiCorp/Consul/aws`, + wantForProtocol: `HashiCorp/Consul/aws`, + }, + "private registry with non-standard port": { + input: `Example.com:1234/HashiCorp/Consul/aws`, + wantString: `example.com:1234/HashiCorp/Consul/aws`, + wantForDisplay: `example.com:1234/HashiCorp/Consul/aws`, + wantForProtocol: `HashiCorp/Consul/aws`, + }, + "private registry with IDN hostname": { + input: `Испытание.com/HashiCorp/Consul/aws`, + wantString: `испытание.com/HashiCorp/Consul/aws`, + wantForDisplay: `испытание.com/HashiCorp/Consul/aws`, + wantForProtocol: `HashiCorp/Consul/aws`, + }, + "private registry with IDN hostname and non-standard port": { + input: `Испытание.com:1234/HashiCorp/Consul/aws//Foo`, + wantString: `испытание.com:1234/HashiCorp/Consul/aws//Foo`, + wantForDisplay: `испытание.com:1234/HashiCorp/Consul/aws//Foo`, + wantForProtocol: `HashiCorp/Consul/aws`, + }, + "invalid hostname": { + input: `---.com/HashiCorp/Consul/aws`, + wantErr: `invalid module registry hostname "---.com"; internationalized domain names must be given as direct unicode characters, not in punycode`, + }, + "hostname with only one label": { + // This was historically forbidden in our initial implementation, + // so we keep it forbidden to avoid newly interpreting such + // addresses as registry addresses rather than remote source + // addresses. + input: `foo/var/baz/qux`, + wantErr: `invalid module registry hostname: must contain at least one dot`, + }, + "invalid target system characters": { + input: `foo/var/no-no-no`, + wantErr: `invalid target system "no-no-no": must be between one and 64 ASCII letters or digits`, + }, + "invalid target system length": { + input: `foo/var/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaah`, + wantErr: `invalid target system "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaah": must be between one and 64 ASCII letters or digits`, + }, + "invalid namespace": { + input: `boop!/var/baz`, + wantErr: `invalid namespace "boop!": must be between one and 64 characters, including ASCII letters, digits, dashes, and underscores, where dashes and underscores may not be the prefix or suffix`, + }, + "missing part with explicit hostname": { + input: `foo.com/var/baz`, + wantErr: `source address must have three more components after the hostname: the namespace, the name, and the target system`, + }, + "errant query string": { + input: `foo/var/baz?otherthing`, + wantErr: `module registry addresses may not include a query string portion`, + }, + "github.com": { + // We don't allow using github.com like a module registry because + // that conflicts with the historically-supported shorthand for + // installing directly from GitHub-hosted git repositories. + input: `github.com/HashiCorp/Consul/aws`, + wantErr: `can't use "github.com" as a module registry host, because it's reserved for installing directly from version control repositories`, + }, + "bitbucket.org": { + // We don't allow using bitbucket.org like a module registry because + // that conflicts with the historically-supported shorthand for + // installing directly from BitBucket-hosted git repositories. + input: `bitbucket.org/HashiCorp/Consul/aws`, + wantErr: `can't use "bitbucket.org" as a module registry host, because it's reserved for installing directly from version control repositories`, + }, + "local path from current dir": { + // Can't use a local path when we're specifically trying to parse + // a _registry_ source address. + input: `./boop`, + wantErr: `can't use local directory "./boop" as a module registry address`, + }, + "local path from parent dir": { + // Can't use a local path when we're specifically trying to parse + // a _registry_ source address. + input: `../boop`, + wantErr: `can't use local directory "../boop" as a module registry address`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + addrI, err := ParseModuleSourceRegistry(test.input) + + if test.wantErr != "" { + switch { + case err == nil: + t.Errorf("unexpected success\nwant error: %s", test.wantErr) + case err.Error() != test.wantErr: + t.Errorf("wrong error messages\ngot: %s\nwant: %s", err.Error(), test.wantErr) + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + addr, ok := addrI.(ModuleSourceRegistry) + if !ok { + t.Fatalf("wrong address type %T; want %T", addrI, addr) + } + + if got, want := addr.String(), test.wantString; got != want { + t.Errorf("wrong String() result\ngot: %s\nwant: %s", got, want) + } + if got, want := addr.ForDisplay(), test.wantForDisplay; got != want { + t.Errorf("wrong ForDisplay() result\ngot: %s\nwant: %s", got, want) + } + if got, want := addr.PackageAddr.ForRegistryProtocol(), test.wantForProtocol; got != want { + t.Errorf("wrong ForRegistryProtocol() result\ngot: %s\nwant: %s", got, want) + } + }) + } +} diff --git a/internal/terraform/addrs/move_endpoint.go b/internal/terraform/addrs/move_endpoint.go new file mode 100644 index 00000000..ce9b4ac3 --- /dev/null +++ b/internal/terraform/addrs/move_endpoint.go @@ -0,0 +1,296 @@ +package addrs + +import ( + "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/hashicorp/hcl/v2" +) + +// MoveEndpoint is to AbsMoveable and ConfigMoveable what Target is to +// Targetable: a wrapping struct that captures the result of decoding an HCL +// traversal representing a relative path from the current module to +// a moveable object. +// +// Its name reflects that its primary purpose is for the "from" and "to" +// addresses in a "moved" statement in the configuration, but it's also +// valid to use MoveEndpoint for other similar mechanisms that give +// Terraform hints about historical configuration changes that might +// prompt creating a different plan than Terraform would by default. +// +// To obtain a full address from a MoveEndpoint you must use +// either the package function UnifyMoveEndpoints (to get an AbsMoveable) or +// the method ConfigMoveable (to get a ConfigMoveable). +type MoveEndpoint struct { + // SourceRange is the location of the physical endpoint address + // in configuration, if this MoveEndpoint was decoded from a + // configuration expresson. + SourceRange tfdiags.SourceRange + + // Internally we (ab)use AbsMoveable as the representation of our + // relative address, even though everywhere else in Terraform + // AbsMoveable always represents a fully-absolute address. + // In practice, due to the implementation of ParseMoveEndpoint, + // this is always either a ModuleInstance or an AbsResourceInstance, + // and we only consider the possibility of interpreting it as + // a AbsModuleCall or an AbsResource in UnifyMoveEndpoints. + // This is intentionally unexported to encapsulate this unusual + // meaning of AbsMoveable. + relSubject AbsMoveable +} + +func (e *MoveEndpoint) ObjectKind() MoveEndpointKind { + return absMoveableEndpointKind(e.relSubject) +} + +func (e *MoveEndpoint) String() string { + // Our internal pseudo-AbsMoveable representing the relative + // address (either ModuleInstance or AbsResourceInstance) is + // a good enough proxy for the relative move endpoint address + // serialization. + return e.relSubject.String() +} + +func (e *MoveEndpoint) Equal(other *MoveEndpoint) bool { + switch { + case (e == nil) != (other == nil): + return false + case e == nil: + return true + default: + // Since we only use ModuleInstance and AbsResourceInstance in our + // string representation, we have no ambiguity between address types + // and can safely just compare the string representations to + // compare the relSubject values. + return e.String() == other.String() && e.SourceRange == other.SourceRange + } +} + +// MightUnifyWith returns true if it is possible that a later call to +// UnifyMoveEndpoints might succeed if given the reciever and the other +// given endpoint. +// +// This is intended for early static validation of obviously-wrong situations, +// although there are still various semantic errors that this cannot catch. +func (e *MoveEndpoint) MightUnifyWith(other *MoveEndpoint) bool { + // For our purposes here we'll just do a unify without a base module + // address, because the rules for whether unify can succeed depend + // only on the relative part of the addresses, not on which module + // they were declared in. + from, to := UnifyMoveEndpoints(RootModule, e, other) + return from != nil && to != nil +} + +// ConfigMovable transforms the reciever into a ConfigMovable by resolving it +// relative to the given base module, which should be the module where +// the MoveEndpoint expression was found. +// +// The result is useful for finding the target object in the configuration, +// but it's not sufficient for fully interpreting a move statement because +// it lacks the specific module and resource instance keys. +func (e *MoveEndpoint) ConfigMoveable(baseModule Module) ConfigMoveable { + addr := e.relSubject + switch addr := addr.(type) { + case ModuleInstance: + ret := make(Module, 0, len(baseModule)+len(addr)) + ret = append(ret, baseModule...) + ret = append(ret, addr.Module()...) + return ret + case AbsResourceInstance: + moduleAddr := make(Module, 0, len(baseModule)+len(addr.Module)) + moduleAddr = append(moduleAddr, baseModule...) + moduleAddr = append(moduleAddr, addr.Module.Module()...) + return ConfigResource{ + Module: moduleAddr, + Resource: addr.Resource.Resource, + } + default: + // The above should be exhaustive for all of the types + // that ParseMoveEndpoint produces as our intermediate + // address representation. + panic(fmt.Sprintf("unsupported address type %T", addr)) + } + +} + +// ParseMoveEndpoint attempts to interpret the given traversal as a +// "move endpoint" address, which is a relative path from the module containing +// the traversal to a movable object in either the same module or in some +// child module. +// +// This deals only with the syntactic element of a move endpoint expression +// in configuration. Before the result will be useful you'll need to combine +// it with the address of the module where it was declared in order to get +// an absolute address relative to the root module. +func ParseMoveEndpoint(traversal hcl.Traversal) (*MoveEndpoint, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) + + if len(remain) == 0 { + return &MoveEndpoint{ + relSubject: path, + SourceRange: rng, + }, diags + } + + riAddr, moreDiags := parseResourceInstanceUnderModule(path, remain) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + + return &MoveEndpoint{ + relSubject: riAddr, + SourceRange: rng, + }, diags +} + +// UnifyMoveEndpoints takes a pair of MoveEndpoint objects representing the +// "from" and "to" addresses in a moved block, and returns a pair of +// MoveEndpointInModule addresses guaranteed to be of the same dynamic type +// that represent what the two MoveEndpoint addresses refer to. +// +// moduleAddr must be the address of the module where the move was declared. +// +// This function deals both with the conversion from relative to absolute +// addresses and with resolving the ambiguity between no-key instance +// addresses and whole-object addresses, returning the least specific +// address type possible. +// +// Not all combinations of addresses are unifyable: the two addresses must +// either both include resources or both just be modules. If the two +// given addresses are incompatible then UnifyMoveEndpoints returns (nil, nil), +// in which case the caller should typically report an error to the user +// stating the unification constraints. +func UnifyMoveEndpoints(moduleAddr Module, relFrom, relTo *MoveEndpoint) (modFrom, modTo *MoveEndpointInModule) { + + // First we'll make a decision about which address type we're + // ultimately trying to unify to. For our internal purposes + // here we're going to borrow TargetableAddrType just as a + // convenient way to talk about our address types, even though + // targetable address types are not 100% aligned with moveable + // address types. + fromType := relFrom.internalAddrType() + toType := relTo.internalAddrType() + var wantType TargetableAddrType + + // Our goal here is to choose the whole-resource or whole-module-call + // addresses if both agree on it, but to use specific instance addresses + // otherwise. This is a somewhat-arbitrary way to resolve syntactic + // ambiguity between the two situations which allows both for renaming + // whole resources and for switching from a single-instance object to + // a multi-instance object. + switch { + case fromType == AbsResourceInstanceAddrType || toType == AbsResourceInstanceAddrType: + wantType = AbsResourceInstanceAddrType + case fromType == AbsResourceAddrType || toType == AbsResourceAddrType: + wantType = AbsResourceAddrType + case fromType == ModuleInstanceAddrType || toType == ModuleInstanceAddrType: + wantType = ModuleInstanceAddrType + case fromType == ModuleAddrType || toType == ModuleAddrType: + // NOTE: We're fudging a little here and using + // ModuleAddrType to represent AbsModuleCall rather + // than Module. + wantType = ModuleAddrType + default: + panic("unhandled move address types") + } + + modFrom = relFrom.prepareMoveEndpointInModule(moduleAddr, wantType) + modTo = relTo.prepareMoveEndpointInModule(moduleAddr, wantType) + if modFrom == nil || modTo == nil { + // if either of them failed then they both failed, to make the + // caller's life a little easier. + return nil, nil + } + return modFrom, modTo +} + +func (e *MoveEndpoint) prepareMoveEndpointInModule(moduleAddr Module, wantType TargetableAddrType) *MoveEndpointInModule { + // relAddr can only be either AbsResourceInstance or ModuleInstance, the + // internal intermediate representation produced by ParseMoveEndpoint. + relAddr := e.relSubject + + switch relAddr := relAddr.(type) { + case ModuleInstance: + switch wantType { + case ModuleInstanceAddrType: + // Since our internal representation is already a module instance, + // we can just rewrap this one. + return &MoveEndpointInModule{ + SourceRange: e.SourceRange, + module: moduleAddr, + relSubject: relAddr, + } + case ModuleAddrType: + // NOTE: We're fudging a little here and using + // ModuleAddrType to represent AbsModuleCall rather + // than Module. + callerAddr, callAddr := relAddr.Call() + absCallAddr := AbsModuleCall{ + Module: callerAddr, + Call: callAddr, + } + return &MoveEndpointInModule{ + SourceRange: e.SourceRange, + module: moduleAddr, + relSubject: absCallAddr, + } + default: + return nil // can't make any other types from a ModuleInstance + } + case AbsResourceInstance: + switch wantType { + case AbsResourceInstanceAddrType: + return &MoveEndpointInModule{ + SourceRange: e.SourceRange, + module: moduleAddr, + relSubject: relAddr, + } + case AbsResourceAddrType: + return &MoveEndpointInModule{ + SourceRange: e.SourceRange, + module: moduleAddr, + relSubject: relAddr.ContainingResource(), + } + default: + return nil // can't make any other types from an AbsResourceInstance + } + default: + panic(fmt.Sprintf("unhandled address type %T", relAddr)) + } +} + +// internalAddrType helps facilitate our slight abuse of TargetableAddrType +// as a way to talk about our different possible result address types in +// UnifyMoveEndpoints. +// +// It's not really correct to use TargetableAddrType in this way, because +// it's for Targetable rather than for AbsMoveable, but as long as the two +// remain aligned enough it saves introducing yet another enumeration with +// similar members that would be for internal use only anyway. +func (e *MoveEndpoint) internalAddrType() TargetableAddrType { + switch addr := e.relSubject.(type) { + case ModuleInstance: + if !addr.IsRoot() && addr[len(addr)-1].InstanceKey == NoKey { + // NOTE: We're fudging a little here and using + // ModuleAddrType to represent AbsModuleCall rather + // than Module. + return ModuleAddrType + } + return ModuleInstanceAddrType + case AbsResourceInstance: + if addr.Resource.Key == NoKey { + return AbsResourceAddrType + } + return AbsResourceInstanceAddrType + default: + // The above should cover all of the address types produced + // by ParseMoveEndpoint. + panic(fmt.Sprintf("unsupported address type %T", addr)) + } +} diff --git a/internal/terraform/addrs/move_endpoint_kind.go b/internal/terraform/addrs/move_endpoint_kind.go new file mode 100644 index 00000000..cd8adab8 --- /dev/null +++ b/internal/terraform/addrs/move_endpoint_kind.go @@ -0,0 +1,33 @@ +package addrs + +import "fmt" + +// MoveEndpointKind represents the different kinds of object that a movable +// address can refer to. +type MoveEndpointKind rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type MoveEndpointKind + +const ( + // MoveEndpointModule indicates that a move endpoint either refers to + // an individual module instance or to all instances of a particular + // module call. + MoveEndpointModule MoveEndpointKind = 'M' + + // MoveEndpointResource indicates that a move endpoint either refers to + // an individual resource instance or to all instances of a particular + // resource. + MoveEndpointResource MoveEndpointKind = 'R' +) + +func absMoveableEndpointKind(addr AbsMoveable) MoveEndpointKind { + switch addr := addr.(type) { + case ModuleInstance, AbsModuleCall: + return MoveEndpointModule + case AbsResourceInstance, AbsResource: + return MoveEndpointResource + default: + // The above should be exhaustive for all AbsMoveable types. + panic(fmt.Sprintf("unsupported address type %T", addr)) + } +} diff --git a/internal/terraform/addrs/move_endpoint_module.go b/internal/terraform/addrs/move_endpoint_module.go new file mode 100644 index 00000000..fb21894b --- /dev/null +++ b/internal/terraform/addrs/move_endpoint_module.go @@ -0,0 +1,740 @@ +package addrs + +import ( + "fmt" + "reflect" + "strings" + + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// anyKeyImpl is the InstanceKey representation indicating a wildcard, which +// matches all possible keys. This is only used internally for matching +// combinations of address types, where only portions of the path contain key +// information. +type anyKeyImpl rune + +func (k anyKeyImpl) instanceKeySigil() { +} + +func (k anyKeyImpl) String() string { + return fmt.Sprintf("[%s]", string(k)) +} + +func (k anyKeyImpl) Value() cty.Value { + return cty.StringVal(string(k)) +} + +// anyKey is the only valid value of anyKeyImpl +var anyKey = anyKeyImpl('*') + +// MoveEndpointInModule annotates a MoveEndpoint with the address of the +// module where it was declared, which is the form we use for resolving +// whether move statements chain from or are nested within other move +// statements. +type MoveEndpointInModule struct { + // SourceRange is the location of the physical endpoint address + // in configuration, if this MoveEndpoint was decoded from a + // configuration expresson. + SourceRange tfdiags.SourceRange + + // The internals are unexported here because, as with MoveEndpoint, + // we're somewhat abusing AbsMoveable here to represent an address + // relative to the module, rather than as an absolute address. + // Conceptually, the following two fields represent a matching pattern + // for AbsMoveables where the elements of "module" behave as + // ModuleInstanceStep values with a wildcard instance key, because + // a moved block in a module affects all instances of that module. + // Unlike MoveEndpoint, relSubject in this case can be any of the + // address types that implement AbsMoveable. + module Module + relSubject AbsMoveable +} + +// ImpliedMoveStatementEndpoint is a special constructor for MoveEndpointInModule +// which is suitable only for constructing "implied" move statements, which +// means that we inferred the statement automatically rather than building it +// from an explicit block in the configuration. +// +// Implied move endpoints, just as for the statements they are embedded in, +// have somewhat-related-but-imprecise source ranges, typically referring to +// some general configuration construct that implied the statement, because +// by definition there is no explicit move endpoint expression in this case. +func ImpliedMoveStatementEndpoint(addr AbsResourceInstance, rng tfdiags.SourceRange) *MoveEndpointInModule { + // implied move endpoints always belong to the root module, because each + // one refers to a single resource instance inside a specific module + // instance, rather than all instances of the module where the resource + // was declared. + return &MoveEndpointInModule{ + SourceRange: rng, + module: RootModule, + relSubject: addr, + } +} + +func (e *MoveEndpointInModule) ObjectKind() MoveEndpointKind { + return absMoveableEndpointKind(e.relSubject) +} + +// String produces a string representation of the object matching pattern +// represented by the reciever. +// +// Since there is no direct syntax for representing such an object matching +// pattern, this function uses a splat-operator-like representation to stand +// in for the wildcard instance keys. +func (e *MoveEndpointInModule) String() string { + if e == nil { + return "" + } + var buf strings.Builder + for _, name := range e.module { + buf.WriteString("module.") + buf.WriteString(name) + buf.WriteString("[*].") + } + buf.WriteString(e.relSubject.String()) + + // For consistency we'll also use the splat-like wildcard syntax to + // represent the final step being either a resource or module call + // rather than an instance, so we can more easily distinguish the two + // in the string representation. + switch e.relSubject.(type) { + case AbsModuleCall, AbsResource: + buf.WriteString("[*]") + } + + return buf.String() +} + +// Equal returns true if the reciever represents the same matching pattern +// as the other given endpoint, ignoring the source location information. +// +// This is not an optimized function and is here primarily to help with +// writing concise assertions in test code. +func (e *MoveEndpointInModule) Equal(other *MoveEndpointInModule) bool { + if (e == nil) != (other == nil) { + return false + } + if !e.module.Equal(other.module) { + return false + } + // This assumes that all of our possible "movables" are trivially + // comparable with reflect, which is true for all of them at the time + // of writing. + return reflect.DeepEqual(e.relSubject, other.relSubject) +} + +// Module returns the address of the module where the receiving address was +// declared. +func (e *MoveEndpointInModule) Module() Module { + return e.module +} + +// InModuleInstance returns an AbsMoveable address which concatenates the +// given module instance address with the receiver's relative object selection +// to produce one example of an instance that might be affected by this +// move statement. +// +// The result is meaningful only if the given module instance is an instance +// of the same module returned by the method Module. InModuleInstance doesn't +// fully verify that (aside from some cheap/easy checks), but it will produce +// meaningless garbage if not. +func (e *MoveEndpointInModule) InModuleInstance(modInst ModuleInstance) AbsMoveable { + if len(modInst) != len(e.module) { + // We don't check all of the steps to make sure that their names match, + // because it would be expensive to do that repeatedly for every + // instance of a module, but if the lengths don't match then that's + // _obviously_ wrong. + panic("given instance address does not match module address") + } + switch relSubject := e.relSubject.(type) { + case ModuleInstance: + ret := make(ModuleInstance, 0, len(modInst)+len(relSubject)) + ret = append(ret, modInst...) + ret = append(ret, relSubject...) + return ret + case AbsModuleCall: + retModAddr := make(ModuleInstance, 0, len(modInst)+len(relSubject.Module)) + retModAddr = append(retModAddr, modInst...) + retModAddr = append(retModAddr, relSubject.Module...) + return relSubject.Call.Absolute(retModAddr) + case AbsResourceInstance: + retModAddr := make(ModuleInstance, 0, len(modInst)+len(relSubject.Module)) + retModAddr = append(retModAddr, modInst...) + retModAddr = append(retModAddr, relSubject.Module...) + return relSubject.Resource.Absolute(retModAddr) + case AbsResource: + retModAddr := make(ModuleInstance, 0, len(modInst)+len(relSubject.Module)) + retModAddr = append(retModAddr, modInst...) + retModAddr = append(retModAddr, relSubject.Module...) + return relSubject.Resource.Absolute(retModAddr) + default: + panic(fmt.Sprintf("unexpected move subject type %T", relSubject)) + } +} + +// ModuleCallTraversals returns both the address of the module where the +// receiver was declared and any other module calls it traverses through +// while selecting a particular object to move. +// +// This is a rather special-purpose function here mainly to support our +// validation rule that a module can only traverse down into child modules +// that belong to the same module package. +func (e *MoveEndpointInModule) ModuleCallTraversals() (Module, []ModuleCall) { + // We're returning []ModuleCall rather than Module here to make it clearer + // that this is a relative sequence of calls rather than an absolute + // module path. + + var steps []ModuleInstanceStep + switch relSubject := e.relSubject.(type) { + case ModuleInstance: + // We want all of the steps except the last one here, because the + // last one is always selecting something declared in the same module + // even though our address structure doesn't capture that. + steps = []ModuleInstanceStep(relSubject[:len(relSubject)-1]) + case AbsModuleCall: + steps = []ModuleInstanceStep(relSubject.Module) + case AbsResourceInstance: + steps = []ModuleInstanceStep(relSubject.Module) + case AbsResource: + steps = []ModuleInstanceStep(relSubject.Module) + default: + panic(fmt.Sprintf("unexpected move subject type %T", relSubject)) + } + + ret := make([]ModuleCall, len(steps)) + for i, step := range steps { + ret[i] = ModuleCall{Name: step.Name} + } + return e.module, ret +} + +// synthModuleInstance constructs a module instance out of the module path and +// any module portion of the relSubject, substituting Module and Call segments +// with ModuleInstanceStep using the anyKey value. +// This is only used internally for comparison of these complete paths, but +// does not represent how the individual parts are handled elsewhere in the +// code. +func (e *MoveEndpointInModule) synthModuleInstance() ModuleInstance { + var inst ModuleInstance + + for _, mod := range e.module { + inst = append(inst, ModuleInstanceStep{Name: mod, InstanceKey: anyKey}) + } + + switch sub := e.relSubject.(type) { + case ModuleInstance: + inst = append(inst, sub...) + case AbsModuleCall: + inst = append(inst, sub.Module...) + inst = append(inst, ModuleInstanceStep{Name: sub.Call.Name, InstanceKey: anyKey}) + case AbsResource: + inst = append(inst, sub.Module...) + case AbsResourceInstance: + inst = append(inst, sub.Module...) + default: + panic(fmt.Sprintf("unhandled relative address type %T", sub)) + } + + return inst +} + +// SelectsModule returns true if the reciever directly selects either +// the given module or a resource nested directly inside that module. +// +// This is a good function to use to decide which modules in a state +// to consider when processing a particular move statement. For a +// module move the given module itself is what will move, while a +// resource move indicates that we should search each of the resources in +// the given module to see if they match. +func (e *MoveEndpointInModule) SelectsModule(addr ModuleInstance) bool { + synthInst := e.synthModuleInstance() + + // In order to match the given module instance, our combined path must be + // equal in length. + if len(synthInst) != len(addr) { + return false + } + + for i, step := range synthInst { + switch step.InstanceKey { + case anyKey: + // we can match any key as long as the name matches + if step.Name != addr[i].Name { + return false + } + default: + if step != addr[i] { + return false + } + } + } + return true +} + +// SelectsResource returns true if the receiver directly selects either +// the given resource or one of its instances. +func (e *MoveEndpointInModule) SelectsResource(addr AbsResource) bool { + // Only a subset of subject types can possibly select a resource, so + // we'll take care of those quickly before we do anything more expensive. + switch e.relSubject.(type) { + case AbsResource, AbsResourceInstance: + // okay + default: + return false // can't possibly match + } + + if !e.SelectsModule(addr.Module) { + return false + } + + // If we get here then we know the module part matches, so we only need + // to worry about the relative resource part. + switch relSubject := e.relSubject.(type) { + case AbsResource: + return addr.Resource.Equal(relSubject.Resource) + case AbsResourceInstance: + // We intentionally ignore the instance key, because we consider + // instances to be part of the resource they belong to. + return addr.Resource.Equal(relSubject.Resource.Resource) + default: + // We should've filtered out all other types above + panic(fmt.Sprintf("unsupported relSubject type %T", relSubject)) + } +} + +// moduleInstanceCanMatch indicates that modA can match modB taking into +// account steps with an anyKey InstanceKey as wildcards. The comparison of +// wildcard steps is done symmetrically, because varying portions of either +// instance's path could have been derived from configuration vs evaluation. +// The length of modA must be equal or shorter than the length of modB. +func moduleInstanceCanMatch(modA, modB ModuleInstance) bool { + for i, step := range modA { + switch { + case step.InstanceKey == anyKey || modB[i].InstanceKey == anyKey: + // we can match any key as long as the names match + if step.Name != modB[i].Name { + return false + } + default: + if step != modB[i] { + return false + } + } + } + return true +} + +// CanChainFrom returns true if the reciever describes an address that could +// potentially select an object that the other given address could select. +// +// In other words, this decides whether the move chaining rule applies, if +// the reciever is the "to" from one statement and the other given address +// is the "from" of another statement. +func (e *MoveEndpointInModule) CanChainFrom(other *MoveEndpointInModule) bool { + eMod := e.synthModuleInstance() + oMod := other.synthModuleInstance() + + // if the complete paths are different lengths, these cannot refer to the + // same value. + if len(eMod) != len(oMod) { + return false + } + if !moduleInstanceCanMatch(oMod, eMod) { + return false + } + + eSub := e.relSubject + oSub := other.relSubject + + switch oSub := oSub.(type) { + case AbsModuleCall, ModuleInstance: + switch eSub.(type) { + case AbsModuleCall, ModuleInstance: + // we already know the complete module path including any final + // module call name is equal. + return true + } + + case AbsResource: + switch eSub := eSub.(type) { + case AbsResource: + return eSub.Resource.Equal(oSub.Resource) + } + + case AbsResourceInstance: + switch eSub := eSub.(type) { + case AbsResourceInstance: + return eSub.Resource.Equal(oSub.Resource) + } + } + + return false +} + +// NestedWithin returns true if the receiver describes an address that is +// contained within one of the objects that the given other address could +// select. +func (e *MoveEndpointInModule) NestedWithin(other *MoveEndpointInModule) bool { + eMod := e.synthModuleInstance() + oMod := other.synthModuleInstance() + + // In order to be nested within the given endpoint, the module path must be + // shorter or equal. + if len(oMod) > len(eMod) { + return false + } + + if !moduleInstanceCanMatch(oMod, eMod) { + return false + } + + eSub := e.relSubject + oSub := other.relSubject + + switch oSub := oSub.(type) { + case AbsModuleCall: + switch eSub.(type) { + case AbsModuleCall: + // we know the other endpoint selects our module, but if we are + // also a module call our path must be longer to be nested. + return len(eMod) > len(oMod) + } + + return true + + case ModuleInstance: + switch eSub.(type) { + case ModuleInstance, AbsModuleCall: + // a nested module must have a longer path + return len(eMod) > len(oMod) + } + + return true + + case AbsResource: + if len(eMod) != len(oMod) { + // these resources are from different modules + return false + } + + // A resource can only contain a resource instance. + switch eSub := eSub.(type) { + case AbsResourceInstance: + return eSub.Resource.Resource.Equal(oSub.Resource) + } + } + + return false +} + +// matchModuleInstancePrefix is an internal helper to decide whether the given +// module instance address refers to either the module where the move endpoint +// was declared or some descendent of that module. +// +// If so, it will split the given address into two parts: the "prefix" part +// which corresponds with the module where the statement was declared, and +// the "relative" part which is the remainder that the relSubject of the +// statement might match against. +// +// The second return value is another example of our light abuse of +// ModuleInstance to represent _relative_ module references rather than +// absolute: it's a module instance address relative to the same return value. +// Because the exported idea of ModuleInstance represents only _absolute_ +// module instance addresses, we mustn't expose that value through any exported +// API. +func (e *MoveEndpointInModule) matchModuleInstancePrefix(instAddr ModuleInstance) (ModuleInstance, ModuleInstance, bool) { + if len(e.module) > len(instAddr) { + return nil, nil, false // to short to possibly match + } + for i := range e.module { + if e.module[i] != instAddr[i].Name { + return nil, nil, false + } + } + // If we get here then we have a match, so we'll slice up the input + // to produce the prefix and match segments. + return instAddr[:len(e.module)], instAddr[len(e.module):], true +} + +// MoveDestination considers a an address representing a module +// instance in the context of source and destination move endpoints and then, +// if the module address matches the from endpoint, returns the corresponding +// new module address that the object should move to. +// +// MoveDestination will return false in its second return value if the receiver +// doesn't match fromMatch, indicating that the given move statement doesn't +// apply to this object. +// +// Both of the given endpoints must be from the same move statement and thus +// must have matching object types. If not, MoveDestination will panic. +func (m ModuleInstance) MoveDestination(fromMatch, toMatch *MoveEndpointInModule) (ModuleInstance, bool) { + // NOTE: This implementation assumes the invariant that fromMatch and + // toMatch both belong to the same configuration statement, and thus they + // will both have the same address type and the same declaration module. + + // The root module instance is not itself moveable. + if m.IsRoot() { + return nil, false + } + + // The two endpoints must either be module call or module instance + // addresses, or else this statement can never match. + if fromMatch.ObjectKind() != MoveEndpointModule { + return nil, false + } + + // The rest of our work will be against the part of the reciever that's + // relative to the declaration module. mRel is a weird abuse of + // ModuleInstance that represents a relative module address, similar to + // what we do for MoveEndpointInModule.relSubject. + mPrefix, mRel, match := fromMatch.matchModuleInstancePrefix(m) + if !match { + return nil, false + } + + // Our next goal is to split mRel into two parts: the match (if any) and + // the suffix. Our result will then replace the match with the replacement + // in toMatch while preserving the prefix and suffix. + var mSuffix, mNewMatch ModuleInstance + + switch relSubject := fromMatch.relSubject.(type) { + case ModuleInstance: + if len(relSubject) > len(mRel) { + return nil, false // too short to possibly match + } + for i := range relSubject { + if relSubject[i] != mRel[i] { + return nil, false // this step doesn't match + } + } + // If we get to here then we've found a match. Since the statement + // addresses are already themselves ModuleInstance fragments we can + // just slice out the relevant parts. + mNewMatch = toMatch.relSubject.(ModuleInstance) + mSuffix = mRel[len(relSubject):] + case AbsModuleCall: + // The module instance part of relSubject must be a prefix of + // mRel, and mRel must be at least one step longer to account for + // the call step itself. + if len(relSubject.Module) > len(mRel)-1 { + return nil, false + } + for i := range relSubject.Module { + if relSubject.Module[i] != mRel[i] { + return nil, false // this step doesn't match + } + } + // The call name must also match the next step of mRel, after + // the relSubject.Module prefix. + callStep := mRel[len(relSubject.Module)] + if callStep.Name != relSubject.Call.Name { + return nil, false + } + // If we get to here then we've found a match. We need to construct + // a new mNewMatch that's an instance of the "new" relSubject with + // the same key as our call. + mNewMatch = toMatch.relSubject.(AbsModuleCall).Instance(callStep.InstanceKey) + mSuffix = mRel[len(relSubject.Module)+1:] + default: + panic("invalid address type for module-kind move endpoint") + } + + ret := make(ModuleInstance, 0, len(mPrefix)+len(mNewMatch)+len(mSuffix)) + ret = append(ret, mPrefix...) + ret = append(ret, mNewMatch...) + ret = append(ret, mSuffix...) + return ret, true +} + +// MoveDestination considers a an address representing a resource +// in the context of source and destination move endpoints and then, +// if the resource address matches the from endpoint, returns the corresponding +// new resource address that the object should move to. +// +// MoveDestination will return false in its second return value if the receiver +// doesn't match fromMatch, indicating that the given move statement doesn't +// apply to this object. +// +// Both of the given endpoints must be from the same move statement and thus +// must have matching object types. If not, MoveDestination will panic. +func (r AbsResource) MoveDestination(fromMatch, toMatch *MoveEndpointInModule) (AbsResource, bool) { + switch fromMatch.ObjectKind() { + case MoveEndpointModule: + // If we've moving a module then any resource inside that module + // moves too. + fromMod := r.Module + toMod, match := fromMod.MoveDestination(fromMatch, toMatch) + if !match { + return AbsResource{}, false + } + return r.Resource.Absolute(toMod), true + + case MoveEndpointResource: + fromRelSubject, ok := fromMatch.relSubject.(AbsResource) + if !ok { + // The only other possible type for a resource move is + // AbsResourceInstance, and that can never match an AbsResource. + return AbsResource{}, false + } + + // fromMatch can only possibly match the reciever if the resource + // portions are identical, regardless of the module paths. + if fromRelSubject.Resource != r.Resource { + return AbsResource{}, false + } + + // The module path portion of relSubject must have a prefix that + // matches the module where our endpoints were declared. + mPrefix, mRel, match := fromMatch.matchModuleInstancePrefix(r.Module) + if !match { + return AbsResource{}, false + } + + // The remaining steps of the module path must _exactly_ match + // the relative module path in the "fromMatch" address. + if len(mRel) != len(fromRelSubject.Module) { + return AbsResource{}, false // can't match if lengths are different + } + for i := range mRel { + if mRel[i] != fromRelSubject.Module[i] { + return AbsResource{}, false // all of the steps must match + } + } + + // If we got here then we have a match, and so our result is the + // module instance where the statement was declared (mPrefix) followed + // by the "to" relative address in toMatch. + toRelSubject := toMatch.relSubject.(AbsResource) + var mNew ModuleInstance + if len(mPrefix) > 0 || len(toRelSubject.Module) > 0 { + mNew = make(ModuleInstance, 0, len(mPrefix)+len(toRelSubject.Module)) + mNew = append(mNew, mPrefix...) + mNew = append(mNew, toRelSubject.Module...) + } + ret := toRelSubject.Resource.Absolute(mNew) + return ret, true + + default: + panic("unexpected object kind") + } +} + +// MoveDestination considers a an address representing a resource +// instance in the context of source and destination move endpoints and then, +// if the instance address matches the from endpoint, returns the corresponding +// new instance address that the object should move to. +// +// MoveDestination will return false in its second return value if the receiver +// doesn't match fromMatch, indicating that the given move statement doesn't +// apply to this object. +// +// Both of the given endpoints must be from the same move statement and thus +// must have matching object types. If not, MoveDestination will panic. +func (r AbsResourceInstance) MoveDestination(fromMatch, toMatch *MoveEndpointInModule) (AbsResourceInstance, bool) { + switch fromMatch.ObjectKind() { + case MoveEndpointModule: + // If we've moving a module then any resource inside that module + // moves too. + fromMod := r.Module + toMod, match := fromMod.MoveDestination(fromMatch, toMatch) + if !match { + return AbsResourceInstance{}, false + } + return r.Resource.Absolute(toMod), true + + case MoveEndpointResource: + switch fromMatch.relSubject.(type) { + case AbsResource: + oldResource := r.ContainingResource() + newResource, match := oldResource.MoveDestination(fromMatch, toMatch) + if !match { + return AbsResourceInstance{}, false + } + return newResource.Instance(r.Resource.Key), true + case AbsResourceInstance: + fromRelSubject, ok := fromMatch.relSubject.(AbsResourceInstance) + if !ok { + // The only other possible type for a resource move is + // AbsResourceInstance, and that can never match an AbsResource. + return AbsResourceInstance{}, false + } + + // fromMatch can only possibly match the reciever if the resource + // portions are identical, regardless of the module paths. + if fromRelSubject.Resource != r.Resource { + return AbsResourceInstance{}, false + } + + // The module path portion of relSubject must have a prefix that + // matches the module where our endpoints were declared. + mPrefix, mRel, match := fromMatch.matchModuleInstancePrefix(r.Module) + if !match { + return AbsResourceInstance{}, false + } + + // The remaining steps of the module path must _exactly_ match + // the relative module path in the "fromMatch" address. + if len(mRel) != len(fromRelSubject.Module) { + return AbsResourceInstance{}, false // can't match if lengths are different + } + for i := range mRel { + if mRel[i] != fromRelSubject.Module[i] { + return AbsResourceInstance{}, false // all of the steps must match + } + } + + // If we got here then we have a match, and so our result is the + // module instance where the statement was declared (mPrefix) followed + // by the "to" relative address in toMatch. + toRelSubject := toMatch.relSubject.(AbsResourceInstance) + var mNew ModuleInstance + if len(mPrefix) > 0 || len(toRelSubject.Module) > 0 { + mNew = make(ModuleInstance, 0, len(mPrefix)+len(toRelSubject.Module)) + mNew = append(mNew, mPrefix...) + mNew = append(mNew, toRelSubject.Module...) + } + ret := toRelSubject.Resource.Absolute(mNew) + return ret, true + default: + panic("invalid address type for resource-kind move endpoint") + } + default: + panic("unexpected object kind") + } +} + +// IsModuleReIndex takes the From and To endpoints from a single move +// statement, and returns true if the only changes are to module indexes, and +// all non-absolute paths remain the same. +func (from *MoveEndpointInModule) IsModuleReIndex(to *MoveEndpointInModule) bool { + // The statements must originate from the same module. + if !from.module.Equal(to.module) { + panic("cannot compare move expressions from different modules") + } + + switch f := from.relSubject.(type) { + case AbsModuleCall: + switch t := to.relSubject.(type) { + case ModuleInstance: + // Generate a synthetic module to represent the full address of + // the module call. We're not actually comparing indexes, so the + // instance doesn't matter. + callAddr := f.Instance(NoKey).Module() + return callAddr.Equal(t.Module()) + } + + case ModuleInstance: + switch t := to.relSubject.(type) { + case AbsModuleCall: + callAddr := t.Instance(NoKey).Module() + return callAddr.Equal(f.Module()) + + case ModuleInstance: + return t.Module().Equal(f.Module()) + } + } + + return false +} diff --git a/internal/terraform/addrs/move_endpoint_module_test.go b/internal/terraform/addrs/move_endpoint_module_test.go new file mode 100644 index 00000000..ce6af283 --- /dev/null +++ b/internal/terraform/addrs/move_endpoint_module_test.go @@ -0,0 +1,1745 @@ +package addrs + +import ( + "fmt" + "strings" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestModuleInstanceMoveDestination(t *testing.T) { + tests := []struct { + DeclModule string + StmtFrom, StmtTo string + Receiver string + WantMatch bool + WantResult string + }{ + { + ``, + `module.foo`, + `module.bar`, + `module.foo`, + true, + `module.bar`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo[1]`, + true, + `module.bar[1]`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo["a"]`, + true, + `module.bar["a"]`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo`, + true, + `module.bar.module.foo`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar`, + true, + `module.bar`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[1]`, + true, + `module.foo[2]`, + }, + { + ``, + `module.foo[1]`, + `module.foo`, + `module.foo[1]`, + true, + `module.foo`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo`, + true, + `module.foo[1]`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar`, + true, + `module.foo[1].module.bar`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar[0]`, + true, + `module.foo[1].module.bar[0]`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo[0]`, + true, + `module.bar.module.foo[0]`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar[0]`, + true, + `module.bar[0]`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo.module.bar`, + true, + `module.foo.module.baz`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo[1].module.bar`, + true, + `module.foo[1].module.baz`, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.foo[1].module.bar`, + true, + `module.foo[1].module.bar[1]`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo`, + false, // the receiver has a non-matching instance key (NoKey) + ``, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[2]`, + false, // the receiver is already the "to" address + ``, + }, + { + ``, + `module.foo`, + `module.bar`, + ``, + false, // the root module can never be moved + ``, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.boz`, + false, // the receiver is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.bar`, + `module.bar[1]`, + `module.boz`, + false, // the receiver is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.a`, + `module.b`, + `module.boz`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.c`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.c`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.a1.module.b2`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.b1.module.a2`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.a1.module.b2[0]`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `foo_instance.bar`, + `foo_instance.baz`, + `module.foo`, + false, // a resource address can never match a module instance + ``, + }, + } + + for _, test := range tests { + t.Run( + fmt.Sprintf( + "%s: %s to %s with %s", + test.DeclModule, + test.StmtFrom, test.StmtTo, + test.Receiver, + ), + func(t *testing.T) { + + parseStmtEP := func(t *testing.T, input string) *MoveEndpoint { + t.Helper() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + return moveEp + } + + fromEPLocal := parseStmtEP(t, test.StmtFrom) + toEPLocal := parseStmtEP(t, test.StmtTo) + + declModule := RootModule + if test.DeclModule != "" { + declModule = strings.Split(test.DeclModule, ".") + } + fromEP, toEP := UnifyMoveEndpoints(declModule, fromEPLocal, toEPLocal) + if fromEP == nil || toEP == nil { + t.Fatalf("invalid test case: non-unifyable endpoints\nfrom: %s\nto: %s", fromEPLocal, toEPLocal) + } + + receiverAddr := RootModuleInstance + if test.Receiver != "" { + var diags tfdiags.Diagnostics + receiverAddr, diags = ParseModuleInstanceStr(test.Receiver) + if diags.HasErrors() { + t.Fatalf("invalid reciever address: %s", diags.Err().Error()) + } + } + gotAddr, gotMatch := receiverAddr.MoveDestination(fromEP, toEP) + if !test.WantMatch { + if gotMatch { + t.Errorf("unexpected match\nreceiver: %s\nfrom: %s\nto: %s\nresult: %s", test.Receiver, fromEP, toEP, gotAddr) + } + return + } + + if !gotMatch { + t.Errorf("unexpected non-match\nreceiver: %s\nfrom: %s\nto: %s", test.Receiver, fromEP, toEP) + } + + if gotStr, wantStr := gotAddr.String(), test.WantResult; gotStr != wantStr { + t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) + } + }, + ) + } +} + +func TestAbsResourceInstanceMoveDestination(t *testing.T) { + tests := []struct { + DeclModule string + StmtFrom, StmtTo string + Receiver string + WantMatch bool + WantResult string + }{ + { + ``, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + true, + `test_object.boop`, + }, + { + ``, + `test_object.beep`, + `test_object.beep[2]`, + `test_object.beep`, + true, + `test_object.beep[2]`, + }, + { + ``, + `test_object.beep`, + `module.foo.test_object.beep`, + `test_object.beep`, + true, + `module.foo.test_object.beep`, + }, + { + ``, + `test_object.beep[2]`, + `module.foo.test_object.beep["a"]`, + `test_object.beep[2]`, + true, + `module.foo.test_object.beep["a"]`, + }, + { + ``, + `test_object.beep`, + `module.foo[0].test_object.beep`, + `test_object.beep`, + true, + `module.foo[0].test_object.beep`, + }, + { + ``, + `module.foo.test_object.beep`, + `test_object.beep`, + `module.foo.test_object.beep`, + true, + `test_object.beep`, + }, + { + ``, + `module.foo[0].test_object.beep`, + `test_object.beep`, + `module.foo[0].test_object.beep`, + true, + `test_object.beep`, + }, + { + `foo`, + `test_object.beep`, + `test_object.boop`, + `module.foo[0].test_object.beep`, + true, + `module.foo[0].test_object.boop`, + }, + { + `foo`, + `test_object.beep`, + `test_object.beep[1]`, + `module.foo[0].test_object.beep`, + true, + `module.foo[0].test_object.beep[1]`, + }, + { + ``, + `test_object.beep`, + `test_object.boop`, + `test_object.boop`, + false, // the reciever is already the "to" address + ``, + }, + { + ``, + `test_object.beep[1]`, + `test_object.beep[2]`, + `test_object.beep[5]`, + false, // the receiver has a non-matching instance key + ``, + }, + { + `foo`, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + false, // the receiver is not inside an instance of module "foo" + ``, + }, + { + `foo.bar`, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + false, // the receiver is not inside an instance of module "foo.bar" + ``, + }, + { + ``, + `module.foo[0].test_object.beep`, + `test_object.beep`, + `module.foo[1].test_object.beep`, + false, // receiver is in a different instance of module.foo + ``, + }, + + // Moving a module also moves all of the resources declared within it. + // The following tests all cover variations of that rule. + { + ``, + `module.foo`, + `module.bar`, + `module.foo.test_object.beep`, + true, + `module.bar.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo[1].test_object.beep`, + true, + `module.bar[1].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo["a"].test_object.beep`, + true, + `module.bar["a"].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo.test_object.beep`, + true, + `module.bar.module.foo.test_object.beep`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar.test_object.beep`, + true, + `module.bar.test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[1].test_object.beep`, + true, + `module.foo[2].test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo`, + `module.foo[1].test_object.beep`, + true, + `module.foo.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.test_object.beep`, + true, + `module.foo[1].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar.test_object.beep`, + true, + `module.foo[1].module.bar.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar[0].test_object.beep`, + true, + `module.foo[1].module.bar[0].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo[0].test_object.beep`, + true, + `module.bar.module.foo[0].test_object.beep`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar[0].test_object.beep`, + true, + `module.bar[0].test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo.module.bar.test_object.beep`, + true, + `module.foo.module.baz.test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo[1].module.bar.test_object.beep`, + true, + `module.foo[1].module.baz.test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.foo[1].module.bar.test_object.beep`, + true, + `module.foo[1].module.bar[1].test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo.test_object.beep`, + false, // the receiver module has a non-matching instance key (NoKey) + ``, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[2].test_object.beep`, + false, // the receiver is already at the "to" address + ``, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.bar`, + `module.bar[1]`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.a`, + `module.b`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.c.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.c.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.a1.module.b2.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.b1.module.a2.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.a1.module.b2[0].test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `foo_instance.bar`, + `foo_instance.baz`, + `module.foo.test_object.beep`, + false, // the resource address is unrelated to the move statements + ``, + }, + } + + for _, test := range tests { + t.Run( + fmt.Sprintf( + "%s: %s to %s with %s", + test.DeclModule, + test.StmtFrom, test.StmtTo, + test.Receiver, + ), + func(t *testing.T) { + + parseStmtEP := func(t *testing.T, input string) *MoveEndpoint { + t.Helper() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + return moveEp + } + + fromEPLocal := parseStmtEP(t, test.StmtFrom) + toEPLocal := parseStmtEP(t, test.StmtTo) + + declModule := RootModule + if test.DeclModule != "" { + declModule = strings.Split(test.DeclModule, ".") + } + fromEP, toEP := UnifyMoveEndpoints(declModule, fromEPLocal, toEPLocal) + if fromEP == nil || toEP == nil { + t.Fatalf("invalid test case: non-unifyable endpoints\nfrom: %s\nto: %s", fromEPLocal, toEPLocal) + } + + receiverAddr, diags := ParseAbsResourceInstanceStr(test.Receiver) + if diags.HasErrors() { + t.Fatalf("invalid reciever address: %s", diags.Err().Error()) + } + gotAddr, gotMatch := receiverAddr.MoveDestination(fromEP, toEP) + if !test.WantMatch { + if gotMatch { + t.Errorf("unexpected match\nreceiver: %s\nfrom: %s\nto: %s\nresult: %s", test.Receiver, fromEP, toEP, gotAddr) + } + return + } + + if !gotMatch { + t.Fatalf("unexpected non-match\nreceiver: %s (%T)\nfrom: %s\nto: %s\ngot: (no match)\nwant: %s", test.Receiver, receiverAddr, fromEP, toEP, test.WantResult) + } + + if gotStr, wantStr := gotAddr.String(), test.WantResult; gotStr != wantStr { + t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) + } + }, + ) + } +} + +func TestAbsResourceMoveDestination(t *testing.T) { + tests := []struct { + DeclModule string + StmtFrom, StmtTo string + Receiver string + WantMatch bool + WantResult string + }{ + { + ``, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + true, + `test_object.boop`, + }, + { + ``, + `test_object.beep`, + `module.foo.test_object.beep`, + `test_object.beep`, + true, + `module.foo.test_object.beep`, + }, + { + ``, + `test_object.beep`, + `module.foo[0].test_object.beep`, + `test_object.beep`, + true, + `module.foo[0].test_object.beep`, + }, + { + ``, + `module.foo.test_object.beep`, + `test_object.beep`, + `module.foo.test_object.beep`, + true, + `test_object.beep`, + }, + { + ``, + `module.foo[0].test_object.beep`, + `test_object.beep`, + `module.foo[0].test_object.beep`, + true, + `test_object.beep`, + }, + { + `foo`, + `test_object.beep`, + `test_object.boop`, + `module.foo[0].test_object.beep`, + true, + `module.foo[0].test_object.boop`, + }, + { + ``, + `test_object.beep`, + `test_object.boop`, + `test_object.boop`, + false, // the reciever is already the "to" address + ``, + }, + { + `foo`, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + false, // the receiver is not inside an instance of module "foo" + ``, + }, + { + `foo.bar`, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + false, // the receiver is not inside an instance of module "foo.bar" + ``, + }, + { + ``, + `module.foo[0].test_object.beep`, + `test_object.beep`, + `module.foo[1].test_object.beep`, + false, // receiver is in a different instance of module.foo + ``, + }, + + // Moving a module also moves all of the resources declared within it. + // The following tests all cover variations of that rule. + { + ``, + `module.foo`, + `module.bar`, + `module.foo.test_object.beep`, + true, + `module.bar.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo[1].test_object.beep`, + true, + `module.bar[1].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo["a"].test_object.beep`, + true, + `module.bar["a"].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo.test_object.beep`, + true, + `module.bar.module.foo.test_object.beep`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar.test_object.beep`, + true, + `module.bar.test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[1].test_object.beep`, + true, + `module.foo[2].test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo`, + `module.foo[1].test_object.beep`, + true, + `module.foo.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.test_object.beep`, + true, + `module.foo[1].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar.test_object.beep`, + true, + `module.foo[1].module.bar.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar[0].test_object.beep`, + true, + `module.foo[1].module.bar[0].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo[0].test_object.beep`, + true, + `module.bar.module.foo[0].test_object.beep`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar[0].test_object.beep`, + true, + `module.bar[0].test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo.module.bar.test_object.beep`, + true, + `module.foo.module.baz.test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo[1].module.bar.test_object.beep`, + true, + `module.foo[1].module.baz.test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.foo[1].module.bar.test_object.beep`, + true, + `module.foo[1].module.bar[1].test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo.test_object.beep`, + false, // the receiver module has a non-matching instance key (NoKey) + ``, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[2].test_object.beep`, + false, // the receiver is already at the "to" address + ``, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.bar`, + `module.bar[1]`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.a`, + `module.b`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.c.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.c.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.a1.module.b2.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.b1.module.a2.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.a1.module.b2[0].test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `foo_instance.bar`, + `foo_instance.baz`, + `module.foo.test_object.beep`, + false, // the resource address is unrelated to the move statements + ``, + }, + } + + for i, test := range tests { + t.Run( + fmt.Sprintf( + "[%02d] %s: %s to %s with %s", + i, + test.DeclModule, + test.StmtFrom, test.StmtTo, + test.Receiver, + ), + func(t *testing.T) { + + parseStmtEP := func(t *testing.T, input string) *MoveEndpoint { + t.Helper() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + return moveEp + } + + fromEPLocal := parseStmtEP(t, test.StmtFrom) + toEPLocal := parseStmtEP(t, test.StmtTo) + + declModule := RootModule + if test.DeclModule != "" { + declModule = strings.Split(test.DeclModule, ".") + } + fromEP, toEP := UnifyMoveEndpoints(declModule, fromEPLocal, toEPLocal) + if fromEP == nil || toEP == nil { + t.Fatalf("invalid test case: non-unifyable endpoints\nfrom: %s\nto: %s", fromEPLocal, toEPLocal) + } + + // We only have an AbsResourceInstance parser, not an + // AbsResourceParser, and so we'll just cheat and parse this + // as a resource instance but fail if it includes an instance + // key. + receiverInstanceAddr, diags := ParseAbsResourceInstanceStr(test.Receiver) + if diags.HasErrors() { + t.Fatalf("invalid reciever address: %s", diags.Err().Error()) + } + if receiverInstanceAddr.Resource.Key != NoKey { + t.Fatalf("invalid reciever address: must be a resource, not a resource instance") + } + receiverAddr := receiverInstanceAddr.ContainingResource() + gotAddr, gotMatch := receiverAddr.MoveDestination(fromEP, toEP) + if !test.WantMatch { + if gotMatch { + t.Errorf("unexpected match\nreceiver: %s (%T)\nfrom: %s\nto: %s\nresult: %s", test.Receiver, receiverAddr, fromEP, toEP, gotAddr) + } + return + } + + if !gotMatch { + t.Fatalf("unexpected non-match\nreceiver: %s (%T)\nfrom: %s\nto: %s\ngot: no match\nwant: %s", test.Receiver, receiverAddr, fromEP, toEP, test.WantResult) + } + + if gotStr, wantStr := gotAddr.String(), test.WantResult; gotStr != wantStr { + t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) + } + }, + ) + } +} + +func TestMoveEndpointChainAndNested(t *testing.T) { + tests := []struct { + Endpoint, Other AbsMoveable + EndpointMod, OtherMod Module + CanChainFrom, NestedWithin bool + }{ + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + CanChainFrom: true, + NestedWithin: false, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2]"), + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + CanChainFrom: false, + NestedWithin: false, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2].module.bar[2]"), + Other: AbsModuleCall{ + Module: RootModuleInstance, + Call: ModuleCall{Name: "foo"}, + }, + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].module.bar.resource.baz").ContainingResource(), + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].module.bar[3].resource.baz[2]"), + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + Other: mustParseModuleInstanceStr("module.foo[2]"), + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2]"), + Other: mustParseModuleInstanceStr("module.foo[2]"), + CanChainFrom: true, + NestedWithin: false, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + Other: mustParseModuleInstanceStr("module.foo[2]"), + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].module.bar.resource.baz"), + Other: mustParseModuleInstanceStr("module.foo[2]"), + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + CanChainFrom: false, + NestedWithin: false, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2]"), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + CanChainFrom: false, + NestedWithin: false, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + CanChainFrom: true, + NestedWithin: false, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz[2]").ContainingResource(), + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: false, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2]"), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: false, + }, + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: false, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz"), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + Other: mustParseAbsResourceInstanceStr("resource.baz"), + OtherMod: Module{"foo"}, + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz"), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("resource.baz"), + OtherMod: Module{"foo"}, + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz").ContainingResource(), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + CanChainFrom: true, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2].module.baz"), + Other: mustParseModuleInstanceStr("module.baz"), + OtherMod: Module{"foo"}, + CanChainFrom: true, + }, + + { + Endpoint: AbsModuleCall{ + Call: ModuleCall{Name: "bing"}, + }, + EndpointMod: Module{"foo", "baz"}, + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.baz"), + Call: ModuleCall{Name: "bing"}, + }, + OtherMod: Module{"foo"}, + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz"), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + Other: mustParseAbsResourceInstanceStr("resource.baz").ContainingResource(), + OtherMod: Module{"foo"}, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz"), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("resource.baz").ContainingResource(), + OtherMod: Module{"foo"}, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("ressurce.baz").ContainingResource(), + EndpointMod: Module{"foo"}, + Other: mustParseModuleInstanceStr("module.foo[2]"), + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Call: ModuleCall{Name: "bang"}, + }, + EndpointMod: Module{"foo", "baz", "bing"}, + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.baz"), + Call: ModuleCall{Name: "bing"}, + }, + OtherMod: Module{"foo"}, + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.bing"), + Call: ModuleCall{Name: "bang"}, + }, + EndpointMod: Module{"foo", "baz"}, + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo.module.baz"), + Call: ModuleCall{Name: "bing"}, + }, + NestedWithin: true, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("[%02d]%s.CanChainFrom(%s)", i, test.Endpoint, test.Other), + func(t *testing.T) { + endpoint := &MoveEndpointInModule{ + relSubject: test.Endpoint, + module: test.EndpointMod, + } + + other := &MoveEndpointInModule{ + relSubject: test.Other, + module: test.OtherMod, + } + + if endpoint.CanChainFrom(other) != test.CanChainFrom { + t.Errorf("expected %s CanChainFrom %s == %t", endpoint, other, test.CanChainFrom) + } + + if endpoint.NestedWithin(other) != test.NestedWithin { + t.Errorf("expected %s NestedWithin %s == %t", endpoint, other, test.NestedWithin) + } + }, + ) + } +} + +func TestSelectsModule(t *testing.T) { + tests := []struct { + Endpoint *MoveEndpointInModule + Addr ModuleInstance + Selects bool + }{ + { + Endpoint: &MoveEndpointInModule{ + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + }, + Addr: mustParseModuleInstanceStr("module.foo[2].module.bar[1]"), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + module: mustParseModuleInstanceStr("module.foo").Module(), + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.bar[2]"), + Call: ModuleCall{Name: "baz"}, + }, + }, + Addr: mustParseModuleInstanceStr("module.foo[2].module.bar[2].module.baz"), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + module: mustParseModuleInstanceStr("module.foo").Module(), + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.bar[2]"), + Call: ModuleCall{Name: "baz"}, + }, + }, + Addr: mustParseModuleInstanceStr("module.foo[2].module.bar[1].module.baz"), + Selects: false, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.bar"), + Call: ModuleCall{Name: "baz"}, + }, + }, + Addr: mustParseModuleInstanceStr("module.bar[1].module.baz"), + Selects: false, + }, + { + Endpoint: &MoveEndpointInModule{ + module: mustParseModuleInstanceStr("module.foo").Module(), + relSubject: mustParseAbsResourceInstanceStr(`module.bar.resource.name["key"]`), + }, + Addr: mustParseModuleInstanceStr(`module.foo[1].module.bar`), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + }, + Addr: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseAbsResourceInstanceStr(`module.bar.module.baz["key"].resource.name`).ContainingResource(), + }, + Addr: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + module: mustParseModuleInstanceStr("module.nope").Module(), + relSubject: mustParseAbsResourceInstanceStr(`module.bar.resource.name["key"]`), + }, + Addr: mustParseModuleInstanceStr(`module.foo[1].module.bar`), + Selects: false, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + }, + Addr: mustParseModuleInstanceStr(`module.bar.module.baz["nope"]`), + Selects: false, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseAbsResourceInstanceStr(`module.nope.module.baz["key"].resource.name`).ContainingResource(), + }, + Addr: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + Selects: false, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("[%02d]%s.SelectsModule(%s)", i, test.Endpoint, test.Addr), + func(t *testing.T) { + if test.Endpoint.SelectsModule(test.Addr) != test.Selects { + t.Errorf("expected %s SelectsModule %s == %t", test.Endpoint, test.Addr, test.Selects) + } + }, + ) + } +} + +func TestSelectsResource(t *testing.T) { + matchingResource := Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "matching", + } + unmatchingResource := Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "unmatching", + } + childMod := Module{ + "child", + } + childModMatchingInst := ModuleInstance{ + ModuleInstanceStep{Name: "child", InstanceKey: StringKey("matching")}, + } + childModUnmatchingInst := ModuleInstance{ + ModuleInstanceStep{Name: "child", InstanceKey: StringKey("unmatching")}, + } + + tests := []struct { + Endpoint *MoveEndpointInModule + Addr AbsResource + Selects bool + }{ + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: true, // exact match + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: unmatchingResource.Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: false, // wrong resource name + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: unmatchingResource.Instance(IntKey(1)).Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: false, // wrong resource name + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Instance(NoKey).Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: true, // matches one instance + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Instance(IntKey(0)).Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: true, // matches one instance + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Instance(StringKey("a")).Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: true, // matches one instance + }, + { + Endpoint: &MoveEndpointInModule{ + module: childMod, + relSubject: matchingResource.Absolute(nil), + }, + Addr: matchingResource.Absolute(childModMatchingInst), + Selects: true, // in one of the instances of the module where the statement was written + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Absolute(childModMatchingInst), + }, + Addr: matchingResource.Absolute(childModMatchingInst), + Selects: true, // exact match + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Instance(IntKey(2)).Absolute(childModMatchingInst), + }, + Addr: matchingResource.Absolute(childModMatchingInst), + Selects: true, // matches one instance + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Absolute(childModMatchingInst), + }, + Addr: matchingResource.Absolute(childModUnmatchingInst), + Selects: false, // the containing module instance doesn't match + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + }, + Addr: matchingResource.Absolute(mustParseModuleInstanceStr("module.foo[2]")), + Selects: false, // a module call can't match a resource + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseModuleInstanceStr("module.foo[2]"), + }, + Addr: matchingResource.Absolute(mustParseModuleInstanceStr("module.foo[2]")), + Selects: false, // a module instance can't match a resource + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("[%02d]%s SelectsResource(%s)", i, test.Endpoint, test.Addr), + func(t *testing.T) { + if got, want := test.Endpoint.SelectsResource(test.Addr), test.Selects; got != want { + t.Errorf("wrong result\nReceiver: %s\nArgument: %s\ngot: %t\nwant: %t", test.Endpoint, test.Addr, got, want) + } + }, + ) + } +} + +func TestIsModuleMoveReIndex(t *testing.T) { + tests := []struct { + from, to AbsMoveable + expect bool + }{ + { + from: mustParseModuleInstanceStr(`module.bar`), + to: mustParseModuleInstanceStr(`module.bar`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar`), + to: mustParseModuleInstanceStr(`module.bar[0]`), + expect: true, + }, + { + from: AbsModuleCall{ + Call: ModuleCall{Name: "bar"}, + }, + to: mustParseModuleInstanceStr(`module.bar[0]`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar["a"]`), + to: AbsModuleCall{ + Call: ModuleCall{Name: "bar"}, + }, + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.foo`), + to: mustParseModuleInstanceStr(`module.bar`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar`), + to: mustParseModuleInstanceStr(`module.foo[0]`), + expect: false, + }, + { + from: AbsModuleCall{ + Call: ModuleCall{Name: "bar"}, + }, + to: mustParseModuleInstanceStr(`module.foo[0]`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar["a"]`), + to: AbsModuleCall{ + Call: ModuleCall{Name: "foo"}, + }, + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.bar.module.baz`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.baz.module.baz`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.baz.module.baz[0]`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.bar[0].module.baz`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar[0].module.baz`), + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar[0].module.baz`), + to: mustParseModuleInstanceStr(`module.bar[1].module.baz[0]`), + expect: true, + }, + { + from: AbsModuleCall{ + Call: ModuleCall{Name: "baz"}, + }, + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + to: AbsModuleCall{ + Call: ModuleCall{Name: "baz"}, + }, + expect: false, + }, + + { + from: AbsModuleCall{ + Module: mustParseModuleInstanceStr(`module.bar[0]`), + Call: ModuleCall{Name: "baz"}, + }, + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: true, + }, + + { + from: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + to: AbsModuleCall{ + Module: mustParseModuleInstanceStr(`module.bar[0]`), + Call: ModuleCall{Name: "baz"}, + }, + expect: true, + }, + + { + from: mustParseModuleInstanceStr(`module.baz`), + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + to: mustParseModuleInstanceStr(`module.baz`), + expect: false, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("[%02d]IsModuleMoveReIndex(%s, %s)", i, test.from, test.to), + func(t *testing.T) { + from := &MoveEndpointInModule{ + relSubject: test.from, + } + + to := &MoveEndpointInModule{ + relSubject: test.to, + } + + if got := from.IsModuleReIndex(to); got != test.expect { + t.Errorf("expected %t, got %t", test.expect, got) + } + }, + ) + } +} + +func mustParseAbsResourceInstanceStr(s string) AbsResourceInstance { + r, diags := ParseAbsResourceInstanceStr(s) + if diags.HasErrors() { + panic(diags.ErrWithWarnings().Error()) + } + return r +} diff --git a/internal/terraform/addrs/move_endpoint_test.go b/internal/terraform/addrs/move_endpoint_test.go new file mode 100644 index 00000000..602c7e97 --- /dev/null +++ b/internal/terraform/addrs/move_endpoint_test.go @@ -0,0 +1,632 @@ +package addrs + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestParseMoveEndpoint(t *testing.T) { + tests := []struct { + Input string + WantRel AbsMoveable // funny intermediate subset of AbsMoveable + WantErr string + }{ + { + `foo.bar`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: NoKey, + }, + }, + ``, + }, + { + `foo.bar[0]`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(0), + }, + }, + ``, + }, + { + `foo.bar["a"]`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: StringKey("a"), + }, + }, + ``, + }, + { + `module.boop.foo.bar`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: NoKey, + }, + }, + ``, + }, + { + `module.boop.foo.bar[0]`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(0), + }, + }, + ``, + }, + { + `module.boop.foo.bar["a"]`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: StringKey("a"), + }, + }, + ``, + }, + { + `data.foo.bar`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: NoKey, + }, + }, + ``, + }, + { + `data.foo.bar[0]`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(0), + }, + }, + ``, + }, + { + `data.foo.bar["a"]`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: StringKey("a"), + }, + }, + ``, + }, + { + `module.boop.data.foo.bar`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: NoKey, + }, + }, + ``, + }, + { + `module.boop.data.foo.bar[0]`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(0), + }, + }, + ``, + }, + { + `module.boop.data.foo.bar["a"]`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: StringKey("a"), + }, + }, + ``, + }, + { + `module.foo`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo"}, + }, + ``, + }, + { + `module.foo[0]`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo", InstanceKey: IntKey(0)}, + }, + ``, + }, + { + `module.foo["a"]`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo", InstanceKey: StringKey("a")}, + }, + ``, + }, + { + `module.foo.module.bar`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo"}, + ModuleInstanceStep{Name: "bar"}, + }, + ``, + }, + { + `module.foo[1].module.bar`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo", InstanceKey: IntKey(1)}, + ModuleInstanceStep{Name: "bar"}, + }, + ``, + }, + { + `module.foo.module.bar[1]`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo"}, + ModuleInstanceStep{Name: "bar", InstanceKey: IntKey(1)}, + }, + ``, + }, + { + `module.foo[0].module.bar[1]`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo", InstanceKey: IntKey(0)}, + ModuleInstanceStep{Name: "bar", InstanceKey: IntKey(1)}, + }, + ``, + }, + { + `module`, + nil, + `Invalid address operator: Prefix "module." must be followed by a module name.`, + }, + { + `module[0]`, + nil, + `Invalid address operator: Prefix "module." must be followed by a module name.`, + }, + { + `module.foo.data`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data.bar`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data[0]`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data.bar[0]`, + nil, + `Invalid address: A resource name is required.`, + }, + { + `module.foo.bar`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.bar[0]`, + nil, + `Invalid address: A resource name is required.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + + switch { + case test.WantErr != "": + if !diags.HasErrors() { + t.Fatalf("unexpected success\nwant error: %s", test.WantErr) + } + gotErr := diags.Err().Error() + if gotErr != test.WantErr { + t.Fatalf("wrong error\ngot: %s\nwant: %s", gotErr, test.WantErr) + } + default: + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + if diff := cmp.Diff(test.WantRel, moveEp.relSubject); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + } + }) + } +} + +func TestUnifyMoveEndpoints(t *testing.T) { + tests := []struct { + InputFrom, InputTo string + Module Module + WantFrom, WantTo string + }{ + { + InputFrom: `foo.bar`, + InputTo: `foo.baz`, + Module: RootModule, + WantFrom: `foo.bar[*]`, + WantTo: `foo.baz[*]`, + }, + { + InputFrom: `foo.bar`, + InputTo: `foo.baz`, + Module: RootModule.Child("a"), + WantFrom: `module.a[*].foo.bar[*]`, + WantTo: `module.a[*].foo.baz[*]`, + }, + { + InputFrom: `foo.bar`, + InputTo: `module.b[0].foo.baz`, + Module: RootModule.Child("a"), + WantFrom: `module.a[*].foo.bar[*]`, + WantTo: `module.a[*].module.b[0].foo.baz[*]`, + }, + { + InputFrom: `foo.bar`, + InputTo: `foo.bar["thing"]`, + Module: RootModule, + WantFrom: `foo.bar`, + WantTo: `foo.bar["thing"]`, + }, + { + InputFrom: `foo.bar["thing"]`, + InputTo: `foo.bar`, + Module: RootModule, + WantFrom: `foo.bar["thing"]`, + WantTo: `foo.bar`, + }, + { + InputFrom: `foo.bar["a"]`, + InputTo: `foo.bar["b"]`, + Module: RootModule, + WantFrom: `foo.bar["a"]`, + WantTo: `foo.bar["b"]`, + }, + { + InputFrom: `module.foo`, + InputTo: `module.bar`, + Module: RootModule, + WantFrom: `module.foo[*]`, + WantTo: `module.bar[*]`, + }, + { + InputFrom: `module.foo`, + InputTo: `module.bar.module.baz`, + Module: RootModule, + WantFrom: `module.foo[*]`, + WantTo: `module.bar.module.baz[*]`, + }, + { + InputFrom: `module.foo`, + InputTo: `module.bar.module.baz`, + Module: RootModule.Child("bloop"), + WantFrom: `module.bloop[*].module.foo[*]`, + WantTo: `module.bloop[*].module.bar.module.baz[*]`, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `module.foo["a"]`, + Module: RootModule, + WantFrom: `module.foo[0]`, + WantTo: `module.foo["a"]`, + }, + { + InputFrom: `module.foo`, + InputTo: `module.foo["a"]`, + Module: RootModule, + WantFrom: `module.foo`, + WantTo: `module.foo["a"]`, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `module.foo`, + Module: RootModule, + WantFrom: `module.foo[0]`, + WantTo: `module.foo`, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `module.foo`, + Module: RootModule.Child("bloop"), + WantFrom: `module.bloop[*].module.foo[0]`, + WantTo: `module.bloop[*].module.foo`, + }, + { + InputFrom: `module.foo`, + InputTo: `foo.bar`, + Module: RootModule, + WantFrom: ``, // Can't unify module call with resource + WantTo: ``, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `foo.bar`, + Module: RootModule, + WantFrom: ``, // Can't unify module instance with resource + WantTo: ``, + }, + { + InputFrom: `module.foo`, + InputTo: `foo.bar[0]`, + Module: RootModule, + WantFrom: ``, // Can't unify module call with resource instance + WantTo: ``, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `foo.bar[0]`, + Module: RootModule, + WantFrom: ``, // Can't unify module instance with resource instance + WantTo: ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s to %s in %s", test.InputFrom, test.InputTo, test.Module), func(t *testing.T) { + parseInput := func(input string) *MoveEndpoint { + t.Helper() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + return moveEp + } + + fromEp := parseInput(test.InputFrom) + toEp := parseInput(test.InputTo) + + gotFrom, gotTo := UnifyMoveEndpoints(test.Module, fromEp, toEp) + if got, want := gotFrom.String(), test.WantFrom; got != want { + t.Errorf("wrong 'from' result\ngot: %s\nwant: %s", got, want) + } + if got, want := gotTo.String(), test.WantTo; got != want { + t.Errorf("wrong 'to' result\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestMoveEndpointConfigMoveable(t *testing.T) { + tests := []struct { + Input string + Module Module + Want ConfigMoveable + }{ + { + `foo.bar`, + RootModule, + ConfigResource{ + Module: RootModule, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + }, + { + `foo.bar[0]`, + RootModule, + ConfigResource{ + Module: RootModule, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + }, + { + `module.foo.bar.baz`, + RootModule, + ConfigResource{ + Module: Module{"foo"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "bar", + Name: "baz", + }, + }, + }, + { + `module.foo[0].bar.baz`, + RootModule, + ConfigResource{ + Module: Module{"foo"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "bar", + Name: "baz", + }, + }, + }, + { + `foo.bar`, + Module{"boop"}, + ConfigResource{ + Module: Module{"boop"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + }, + { + `module.bloop.foo.bar`, + Module{"bleep"}, + ConfigResource{ + Module: Module{"bleep", "bloop"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + }, + { + `module.foo.bar.baz`, + RootModule, + ConfigResource{ + Module: Module{"foo"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "bar", + Name: "baz", + }, + }, + }, + { + `module.foo`, + RootModule, + Module{"foo"}, + }, + { + `module.foo[0]`, + RootModule, + Module{"foo"}, + }, + { + `module.bloop`, + Module{"bleep"}, + Module{"bleep", "bloop"}, + }, + { + `module.bloop[0]`, + Module{"bleep"}, + Module{"bleep", "bloop"}, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s in %s", test.Input, test.Module), func(t *testing.T) { + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + + got := moveEp.ConfigMoveable(test.Module) + if diff := cmp.Diff(test.Want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} diff --git a/internal/terraform/addrs/moveable.go b/internal/terraform/addrs/moveable.go new file mode 100644 index 00000000..b9d2f87f --- /dev/null +++ b/internal/terraform/addrs/moveable.go @@ -0,0 +1,57 @@ +package addrs + +// AbsMoveable is an interface implemented by address types that can be either +// the source or destination of a "moved" statement in configuration, along +// with any other similar cross-module state refactoring statements we might +// allow. +// +// Note that AbsMoveable represents an absolute address relative to the root +// of the configuration, which is different than the direct representation +// of these in configuration where the author gives an address relative to +// the current module where the address is defined. The type MoveEndpoint +type AbsMoveable interface { + absMoveableSigil() + UniqueKeyer + + String() string +} + +// The following are all of the possible AbsMoveable address types: +var ( + _ AbsMoveable = AbsResource{} + _ AbsMoveable = AbsResourceInstance{} + _ AbsMoveable = ModuleInstance(nil) + _ AbsMoveable = AbsModuleCall{} +) + +// AbsMoveableResource is an AbsMoveable that is either a resource or a resource +// instance. +type AbsMoveableResource interface { + AbsMoveable + AffectedAbsResource() AbsResource +} + +// The following are all of the possible AbsMoveableResource types: +var ( + _ AbsMoveableResource = AbsResource{} + _ AbsMoveableResource = AbsResourceInstance{} +) + +// ConfigMoveable is similar to AbsMoveable but represents a static object in +// the configuration, rather than an instance of that object created by +// module expansion. +// +// Note that ConfigMovable represents an absolute address relative to the root +// of the configuration, which is different than the direct representation +// of these in configuration where the author gives an address relative to +// the current module where the address is defined. The type MoveEndpoint +// represents the relative form given directly in configuration. +type ConfigMoveable interface { + configMoveableSigil() +} + +// The following are all of the possible ConfigMovable address types: +var ( + _ ConfigMoveable = ConfigResource{} + _ ConfigMoveable = Module(nil) +) diff --git a/internal/terraform/addrs/moveendpointkind_string.go b/internal/terraform/addrs/moveendpointkind_string.go new file mode 100644 index 00000000..f706fb9c --- /dev/null +++ b/internal/terraform/addrs/moveendpointkind_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type MoveEndpointKind"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[MoveEndpointModule-77] + _ = x[MoveEndpointResource-82] +} + +const ( + _MoveEndpointKind_name_0 = "MoveEndpointModule" + _MoveEndpointKind_name_1 = "MoveEndpointResource" +) + +func (i MoveEndpointKind) String() string { + switch { + case i == 77: + return _MoveEndpointKind_name_0 + case i == 82: + return _MoveEndpointKind_name_1 + default: + return "MoveEndpointKind(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/internal/terraform/addrs/output_value.go b/internal/terraform/addrs/output_value.go index d0dfca56..da164dd2 100644 --- a/internal/terraform/addrs/output_value.go +++ b/internal/terraform/addrs/output_value.go @@ -2,6 +2,10 @@ package addrs import ( "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" ) // OutputValue is the address of an output value, in the context of the module @@ -34,6 +38,7 @@ func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { // configuration. It is related to but separate from ModuleCallOutput, which // represents a module output from the perspective of its parent module. type AbsOutputValue struct { + checkable Module ModuleInstance OutputValue OutputValue } @@ -49,6 +54,14 @@ func (m ModuleInstance) OutputValue(name string) AbsOutputValue { } } +func (v AbsOutputValue) Check(t CheckType, i int) Check { + return Check{ + Container: v, + Type: t, + Index: i, + } +} + func (v AbsOutputValue) String() string { if v.Module.IsRoot() { return v.OutputValue.String() @@ -60,19 +73,81 @@ func (v AbsOutputValue) Equal(o AbsOutputValue) bool { return v.OutputValue == o.OutputValue && v.Module.Equal(o.Module) } +func ParseAbsOutputValue(traversal hcl.Traversal) (AbsOutputValue, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return AbsOutputValue{}, diags + } + + if len(remain) != 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "An output name is required.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + if remain.RootName() != "output" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Output address must start with \"output.\".", + Subject: remain[0].SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + var name string + switch tt := remain[1].(type) { + case hcl.TraverseAttr: + name = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "An output name is required.", + Subject: remain[1].SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + return AbsOutputValue{ + Module: path, + OutputValue: OutputValue{ + Name: name, + }, + }, diags +} + +func ParseAbsOutputValueStr(str string) (AbsOutputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsOutputValue{}, diags + } + + addr, addrDiags := ParseAbsOutputValue(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + // ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, // returning also the module instance that the ModuleCallOutput is relative // to. // // The root module does not have a call, and so this method cannot be used // with outputs in the root module, and will panic in that case. -func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, AbsModuleCallOutput) { +func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallInstanceOutput) { if v.Module.IsRoot() { panic("ReferenceFromCall used with root module output") } caller, call := v.Module.CallInstance() - return caller, AbsModuleCallOutput{ + return caller, ModuleCallInstanceOutput{ Call: call, Name: v.OutputValue.Name, } diff --git a/internal/terraform/addrs/output_value_test.go b/internal/terraform/addrs/output_value_test.go index e56d4ca3..8e7bbeae 100644 --- a/internal/terraform/addrs/output_value_test.go +++ b/internal/terraform/addrs/output_value_test.go @@ -2,7 +2,10 @@ package addrs import ( "fmt" + "strings" "testing" + + "github.com/go-test/deep" ) func TestAbsOutputValueInstanceEqual_true(t *testing.T) { @@ -63,3 +66,66 @@ func TestAbsOutputValueInstanceEqual_false(t *testing.T) { }) } } + +func TestParseAbsOutputValueStr(t *testing.T) { + tests := map[string]struct { + want AbsOutputValue + wantErr string + }{ + "module.foo": { + wantErr: "An output name is required", + }, + "module.foo.output": { + wantErr: "An output name is required", + }, + "module.foo.boop.beep": { + wantErr: "Output address must start with \"output.\"", + }, + "module.foo.output[0]": { + wantErr: "An output name is required", + }, + "output": { + wantErr: "An output name is required", + }, + "output[0]": { + wantErr: "An output name is required", + }, + "output.boop": { + want: AbsOutputValue{ + Module: RootModuleInstance, + OutputValue: OutputValue{ + Name: "boop", + }, + }, + }, + "module.foo.output.beep": { + want: AbsOutputValue{ + Module: mustParseModuleInstanceStr("module.foo"), + OutputValue: OutputValue{ + Name: "beep", + }, + }, + }, + } + + for input, tc := range tests { + t.Run(input, func(t *testing.T) { + got, diags := ParseAbsOutputValueStr(input) + for _, problem := range deep.Equal(got, tc.want) { + t.Errorf(problem) + } + if len(diags) > 0 { + gotErr := diags.Err().Error() + if tc.wantErr == "" { + t.Errorf("got error, expected success: %s", gotErr) + } else if !strings.Contains(gotErr, tc.wantErr) { + t.Errorf("unexpected error\n got: %s\nwant: %s", gotErr, tc.wantErr) + } + } else { + if tc.wantErr != "" { + t.Errorf("got success, expected error: %s", tc.wantErr) + } + } + }) + } +} diff --git a/internal/terraform/addrs/parse_ref.go b/internal/terraform/addrs/parse_ref.go index 7ae9e7cf..b6df8083 100644 --- a/internal/terraform/addrs/parse_ref.go +++ b/internal/terraform/addrs/parse_ref.go @@ -2,10 +2,12 @@ package addrs import ( "fmt" + "strings" "github.com/camptocamp/terraboard/internal/terraform/tfdiags" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" ) // Reference describes a reference to an address with source location @@ -16,6 +18,43 @@ type Reference struct { Remaining hcl.Traversal } +// DisplayString returns a string that approximates the subject and remaining +// traversal of the reciever in a way that resembles the Terraform language +// syntax that could've produced it. +// +// It's not guaranteed to actually be a valid Terraform language expression, +// since the intended use here is primarily for UI messages such as +// diagnostics. +func (r *Reference) DisplayString() string { + if len(r.Remaining) == 0 { + // Easy case: we can just return the subject's string. + return r.Subject.String() + } + + var ret strings.Builder + ret.WriteString(r.Subject.String()) + for _, step := range r.Remaining { + switch tStep := step.(type) { + case hcl.TraverseRoot: + ret.WriteString(tStep.Name) + case hcl.TraverseAttr: + ret.WriteByte('.') + ret.WriteString(tStep.Name) + case hcl.TraverseIndex: + ret.WriteByte('[') + switch tStep.Key.Type() { + case cty.String: + ret.WriteString(fmt.Sprintf("%q", tStep.Key.AsString())) + case cty.Number: + bf := tStep.Key.AsBigFloat() + ret.WriteString(bf.Text('g', 10)) + } + ret.WriteByte(']') + } + } + return ret.String() +} + // ParseRef attempts to extract a referencable address from the prefix of the // given traversal, which must be an absolute traversal or this function // will panic. @@ -191,7 +230,7 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { remain = remain[1:] return &Reference{ - Subject: AbsModuleCallOutput{ + Subject: ModuleCallInstanceOutput{ Name: attrTrav.Name, Call: callInstance, }, diff --git a/internal/terraform/addrs/parse_ref_test.go b/internal/terraform/addrs/parse_ref_test.go index aef8545a..bb6953be 100644 --- a/internal/terraform/addrs/parse_ref_test.go +++ b/internal/terraform/addrs/parse_ref_test.go @@ -294,7 +294,7 @@ func TestParseRef(t *testing.T) { { `module.foo.bar`, &Reference{ - Subject: AbsModuleCallOutput{ + Subject: ModuleCallInstanceOutput{ Call: ModuleCallInstance{ Call: ModuleCall{ Name: "foo", @@ -312,7 +312,7 @@ func TestParseRef(t *testing.T) { { `module.foo.bar.baz`, &Reference{ - Subject: AbsModuleCallOutput{ + Subject: ModuleCallInstanceOutput{ Call: ModuleCallInstance{ Call: ModuleCall{ Name: "foo", @@ -355,7 +355,7 @@ func TestParseRef(t *testing.T) { { `module.foo["baz"].bar`, &Reference{ - Subject: AbsModuleCallOutput{ + Subject: ModuleCallInstanceOutput{ Call: ModuleCallInstance{ Call: ModuleCall{ Name: "foo", @@ -374,7 +374,7 @@ func TestParseRef(t *testing.T) { { `module.foo["baz"].bar.boop`, &Reference{ - Subject: AbsModuleCallOutput{ + Subject: ModuleCallInstanceOutput{ Call: ModuleCallInstance{ Call: ModuleCall{ Name: "foo", diff --git a/internal/terraform/addrs/parse_target.go b/internal/terraform/addrs/parse_target.go index 918a584f..e948577f 100644 --- a/internal/terraform/addrs/parse_target.go +++ b/internal/terraform/addrs/parse_target.go @@ -39,6 +39,36 @@ func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { }, diags } + riAddr, moreDiags := parseResourceInstanceUnderModule(path, remain) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + + var subject Targetable + switch { + case riAddr.Resource.Key == NoKey: + // We always assume that a no-key instance is meant to + // be referring to the whole resource, because the distinction + // doesn't really matter for targets anyway. + subject = riAddr.ContainingResource() + default: + subject = riAddr + } + + return &Target{ + Subject: subject, + SourceRange: rng, + }, diags +} + +func parseResourceInstanceUnderModule(moduleAddr ModuleInstance, remain hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { + // Note that this helper is used as part of both ParseTarget and + // ParseMoveEndpoint, so its error messages should be generic + // enough to suit both situations. + + var diags tfdiags.Diagnostics + mode := ManagedResourceMode if remain.RootName() == "data" { mode = DataResourceMode @@ -52,7 +82,7 @@ func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { Detail: "Resource specification must include a resource type and name.", Subject: remain.SourceRange().Ptr(), }) - return nil, diags + return AbsResourceInstance{}, diags } var typeName, name string @@ -80,7 +110,7 @@ func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { default: panic("unknown mode") } - return nil, diags + return AbsResourceInstance{}, diags } switch tt := remain[1].(type) { @@ -93,14 +123,13 @@ func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { Detail: "A resource name is required.", Subject: remain[1].SourceRange().Ptr(), }) - return nil, diags + return AbsResourceInstance{}, diags } - var subject Targetable remain = remain[2:] switch len(remain) { case 0: - subject = path.Resource(mode, typeName, name) + return moduleAddr.ResourceInstance(mode, typeName, name, NoKey), diags case 1: if tt, ok := remain[0].(hcl.TraverseIndex); ok { key, err := ParseInstanceKey(tt.Key) @@ -111,10 +140,10 @@ func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), Subject: remain[0].SourceRange().Ptr(), }) - return nil, diags + return AbsResourceInstance{}, diags } - subject = path.ResourceInstance(mode, typeName, name, key) + return moduleAddr.ResourceInstance(mode, typeName, name, key), diags } else { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, @@ -122,7 +151,7 @@ func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { Detail: "Resource instance key must be given in square brackets.", Subject: remain[0].SourceRange().Ptr(), }) - return nil, diags + return AbsResourceInstance{}, diags } default: diags = diags.Append(&hcl.Diagnostic{ @@ -131,13 +160,8 @@ func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { Detail: "Unexpected extra operators after address.", Subject: remain[1].SourceRange().Ptr(), }) - return nil, diags + return AbsResourceInstance{}, diags } - - return &Target{ - Subject: subject, - SourceRange: rng, - }, diags } // ParseTargetStr is a helper wrapper around ParseTarget that takes a string @@ -316,3 +340,29 @@ func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagn diags = diags.Append(addrDiags) return addr, diags } + +// ModuleAddr returns the module address portion of the subject of +// the recieving target. +// +// Regardless of specific address type, all targets always include +// a module address. They might also include something in that +// module, which this method always discards if so. +func (t *Target) ModuleAddr() ModuleInstance { + switch addr := t.Subject.(type) { + case ModuleInstance: + return addr + case Module: + // We assume that a module address is really + // referring to a module path containing only + // single-instance modules. + return addr.UnkeyedInstanceShim() + case AbsResourceInstance: + return addr.Module + case AbsResource: + return addr.Module + default: + // The above cases should be exhaustive for all + // implementations of Targetable. + panic(fmt.Sprintf("unsupported target address type %T", addr)) + } +} diff --git a/internal/terraform/addrs/path_attr.go b/internal/terraform/addrs/path_attr.go index cfc13f4b..9de5e134 100644 --- a/internal/terraform/addrs/path_attr.go +++ b/internal/terraform/addrs/path_attr.go @@ -10,3 +10,9 @@ type PathAttr struct { func (pa PathAttr) String() string { return "path." + pa.Name } + +func (pa PathAttr) UniqueKey() UniqueKey { + return pa // A PathAttr is its own UniqueKey +} + +func (pa PathAttr) uniqueKeySigil() {} diff --git a/internal/terraform/addrs/provider.go b/internal/terraform/addrs/provider.go index 527e2b86..c0362eb5 100644 --- a/internal/terraform/addrs/provider.go +++ b/internal/terraform/addrs/provider.go @@ -19,9 +19,9 @@ type Provider struct { Hostname svchost.Hostname } -// DefaultRegistryHost is the hostname used for provider addresses that do +// DefaultProviderRegistryHost is the hostname used for provider addresses that do // not have an explicit hostname. -const DefaultRegistryHost = svchost.Hostname("registry.terraform.io") +const DefaultProviderRegistryHost = svchost.Hostname("registry.terraform.io") // BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider // namespace. Built-in provider addresses must also have their namespace set @@ -59,7 +59,7 @@ func (pt Provider) ForDisplay() string { panic("called ForDisplay on zero-value addrs.Provider") } - if pt.Hostname == DefaultRegistryHost { + if pt.Hostname == DefaultProviderRegistryHost { return pt.Namespace + "/" + pt.Type } return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type @@ -121,7 +121,7 @@ func NewDefaultProvider(name string) Provider { return Provider{ Type: MustParseProviderPart(name), Namespace: "hashicorp", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, } } @@ -144,7 +144,7 @@ func NewLegacyProvider(name string) Provider { // verbatim, even if not compliant with our new naming rules. Type: name, Namespace: LegacyProviderNamespace, - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, } } @@ -204,7 +204,7 @@ func (pt Provider) IsLegacy() bool { panic("called IsLegacy() on zero-value addrs.Provider") } - return pt.Hostname == DefaultRegistryHost && pt.Namespace == LegacyProviderNamespace + return pt.Hostname == DefaultProviderRegistryHost && pt.Namespace == LegacyProviderNamespace } @@ -214,7 +214,7 @@ func (pt Provider) IsDefault() bool { panic("called IsDefault() on zero-value addrs.Provider") } - return pt.Hostname == DefaultRegistryHost && pt.Namespace == "hashicorp" + return pt.Hostname == DefaultProviderRegistryHost && pt.Namespace == "hashicorp" } // Equals returns true if the receiver and other provider have the same attributes. @@ -269,7 +269,7 @@ func ParseProviderSourceString(str string) (Provider, tfdiags.Diagnostics) { return ret, diags } ret.Type = name - ret.Hostname = DefaultRegistryHost + ret.Hostname = DefaultProviderRegistryHost if len(parts) == 1 { return NewDefaultProvider(parts[0]), diags @@ -312,14 +312,14 @@ func ParseProviderSourceString(str string) (Provider, tfdiags.Diagnostics) { ret.Hostname = hn } - if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultRegistryHost { + if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultProviderRegistryHost { // Legacy provider addresses must always be on the default registry // host, because the default registry host decides what actual FQN // each one maps to. diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid provider namespace", - Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultRegistryHost.ForDisplay() + ".", + Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultProviderRegistryHost.ForDisplay() + ".", }) return Provider{}, diags } diff --git a/internal/terraform/addrs/provider_test.go b/internal/terraform/addrs/provider_test.go index 0e02a61f..40361058 100644 --- a/internal/terraform/addrs/provider_test.go +++ b/internal/terraform/addrs/provider_test.go @@ -15,7 +15,7 @@ func TestProviderString(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "hashicorp", }, NewDefaultProvider("test").String(), @@ -23,7 +23,7 @@ func TestProviderString(t *testing.T) { { Provider{ Type: "test-beta", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "hashicorp", }, NewDefaultProvider("test-beta").String(), @@ -39,10 +39,10 @@ func TestProviderString(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "othercorp", }, - DefaultRegistryHost.ForDisplay() + "/othercorp/test", + DefaultProviderRegistryHost.ForDisplay() + "/othercorp/test", }, } @@ -62,7 +62,7 @@ func TestProviderLegacyString(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: LegacyProviderNamespace, }, "test", @@ -93,7 +93,7 @@ func TestProviderDisplay(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "hashicorp", }, "hashicorp/test", @@ -109,7 +109,7 @@ func TestProviderDisplay(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "othercorp", }, "othercorp/test", @@ -132,7 +132,7 @@ func TestProviderIsDefault(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "hashicorp", }, true, @@ -148,7 +148,7 @@ func TestProviderIsDefault(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "othercorp", }, false, @@ -195,7 +195,7 @@ func TestProviderIsBuiltIn(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: BuiltInProviderNamespace, }, false, @@ -203,7 +203,7 @@ func TestProviderIsBuiltIn(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "hashicorp", }, false, @@ -219,7 +219,7 @@ func TestProviderIsBuiltIn(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "othercorp", }, false, @@ -242,7 +242,7 @@ func TestProviderIsLegacy(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: LegacyProviderNamespace, }, true, @@ -258,7 +258,7 @@ func TestProviderIsLegacy(t *testing.T) { { Provider{ Type: "test", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, Namespace: "hashicorp", }, false, @@ -282,7 +282,7 @@ func TestParseProviderSourceStr(t *testing.T) { Provider{ Type: "aws", Namespace: "hashicorp", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, }, false, }, @@ -290,7 +290,7 @@ func TestParseProviderSourceStr(t *testing.T) { Provider{ Type: "aws", Namespace: "hashicorp", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, }, false, }, @@ -298,7 +298,7 @@ func TestParseProviderSourceStr(t *testing.T) { Provider{ Type: "aws", Namespace: "hashicorp", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, }, false, }, @@ -306,7 +306,7 @@ func TestParseProviderSourceStr(t *testing.T) { Provider{ Type: "aws", Namespace: "hashicorp", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, }, false, }, @@ -314,7 +314,7 @@ func TestParseProviderSourceStr(t *testing.T) { Provider{ Type: "aws", Namespace: "hashicorp", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, }, false, }, @@ -322,7 +322,7 @@ func TestParseProviderSourceStr(t *testing.T) { Provider{ Type: "aws", Namespace: "hashicorp", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, }, false, }, @@ -338,7 +338,7 @@ func TestParseProviderSourceStr(t *testing.T) { Provider{ Type: "baz-boop", Namespace: "foo-bar", - Hostname: DefaultRegistryHost, + Hostname: DefaultProviderRegistryHost, }, false, }, @@ -529,22 +529,22 @@ func TestProviderEquals(t *testing.T) { Want bool }{ { - NewProvider(DefaultRegistryHost, "foo", "test"), - NewProvider(DefaultRegistryHost, "foo", "test"), + NewProvider(DefaultProviderRegistryHost, "foo", "test"), + NewProvider(DefaultProviderRegistryHost, "foo", "test"), true, }, { - NewProvider(DefaultRegistryHost, "foo", "test"), - NewProvider(DefaultRegistryHost, "bar", "test"), + NewProvider(DefaultProviderRegistryHost, "foo", "test"), + NewProvider(DefaultProviderRegistryHost, "bar", "test"), false, }, { - NewProvider(DefaultRegistryHost, "foo", "test"), - NewProvider(DefaultRegistryHost, "foo", "my-test"), + NewProvider(DefaultProviderRegistryHost, "foo", "test"), + NewProvider(DefaultProviderRegistryHost, "foo", "my-test"), false, }, { - NewProvider(DefaultRegistryHost, "foo", "test"), + NewProvider(DefaultProviderRegistryHost, "foo", "test"), NewProvider("example.com", "foo", "test"), false, }, diff --git a/internal/terraform/addrs/referenceable.go b/internal/terraform/addrs/referenceable.go index 211083a5..fbbc753d 100644 --- a/internal/terraform/addrs/referenceable.go +++ b/internal/terraform/addrs/referenceable.go @@ -7,6 +7,9 @@ type Referenceable interface { // in lang.Scope.buildEvalContext. referenceableSigil() + // All Referenceable address types must have unique keys. + UniqueKeyer + // String produces a string representation of the address that could be // parsed as a HCL traversal and passed to ParseRef to produce an identical // result. diff --git a/internal/terraform/addrs/resource.go b/internal/terraform/addrs/resource.go index 8aa0efc0..75b8a222 100644 --- a/internal/terraform/addrs/resource.go +++ b/internal/terraform/addrs/resource.go @@ -32,6 +32,12 @@ func (r Resource) Equal(o Resource) bool { return r.Mode == o.Mode && r.Name == o.Name && r.Type == o.Type } +func (r Resource) UniqueKey() UniqueKey { + return r // A Resource is its own UniqueKey +} + +func (r Resource) uniqueKeySigil() {} + // Instance produces the address for a specific instance of the receiver // that is idenfied by the given key. func (r Resource) Instance(key InstanceKey) ResourceInstance { @@ -94,6 +100,12 @@ func (r ResourceInstance) Equal(o ResourceInstance) bool { return r.Key == o.Key && r.Resource.Equal(o.Resource) } +func (r ResourceInstance) UniqueKey() UniqueKey { + return r // A ResourceInstance is its own UniqueKey +} + +func (r ResourceInstance) uniqueKeySigil() {} + // Absolute returns an AbsResourceInstance from the receiver and the given module // instance address. func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { @@ -163,6 +175,10 @@ func (r AbsResource) TargetContains(other Targetable) bool { } } +func (r AbsResource) AddrType() TargetableAddrType { + return AbsResourceAddrType +} + func (r AbsResource) String() string { if len(r.Module) == 0 { return r.Resource.String() @@ -170,13 +186,31 @@ func (r AbsResource) String() string { return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) } +// AffectedAbsResource returns the AbsResource. +func (r AbsResource) AffectedAbsResource() AbsResource { + return r +} + func (r AbsResource) Equal(o AbsResource) bool { return r.Module.Equal(o.Module) && r.Resource.Equal(o.Resource) } +func (r AbsResource) absMoveableSigil() { + // AbsResource is moveable +} + +type absResourceKey string + +func (r absResourceKey) uniqueKeySigil() {} + +func (r AbsResource) UniqueKey() UniqueKey { + return absResourceKey(r.String()) +} + // AbsResourceInstance is an absolute address for a resource instance under a // given module path. type AbsResourceInstance struct { + checkable targetable Module ModuleInstance Resource ResourceInstance @@ -228,6 +262,10 @@ func (r AbsResourceInstance) TargetContains(other Targetable) bool { } } +func (r AbsResourceInstance) AddrType() TargetableAddrType { + return AbsResourceInstanceAddrType +} + func (r AbsResourceInstance) String() string { if len(r.Module) == 0 { return r.Resource.String() @@ -235,6 +273,22 @@ func (r AbsResourceInstance) String() string { return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) } +// AffectedAbsResource returns the AbsResource for the instance. +func (r AbsResourceInstance) AffectedAbsResource() AbsResource { + return AbsResource{ + Module: r.Module, + Resource: r.Resource.Resource, + } +} + +func (r AbsResourceInstance) Check(t CheckType, i int) Check { + return Check{ + Container: r, + Type: t, + Index: i, + } +} + func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool { return r.Module.Equal(o.Module) && r.Resource.Equal(o.Resource) } @@ -268,6 +322,18 @@ func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { } } +type absResourceInstanceKey string + +func (r AbsResourceInstance) UniqueKey() UniqueKey { + return absResourceInstanceKey(r.String()) +} + +func (r absResourceInstanceKey) uniqueKeySigil() {} + +func (r AbsResourceInstance) absMoveableSigil() { + // AbsResourceInstance is moveable +} + // ConfigResource is an address for a resource within a configuration. type ConfigResource struct { targetable @@ -312,6 +378,10 @@ func (r ConfigResource) TargetContains(other Targetable) bool { } } +func (r ConfigResource) AddrType() TargetableAddrType { + return ConfigResourceAddrType +} + func (r ConfigResource) String() string { if len(r.Module) == 0 { return r.Resource.String() @@ -323,6 +393,10 @@ func (r ConfigResource) Equal(o ConfigResource) bool { return r.Module.Equal(o.Module) && r.Resource.Equal(o.Resource) } +func (r ConfigResource) configMoveableSigil() { + // AbsResource is moveable +} + // ResourceMode defines which lifecycle applies to a given resource. Each // resource lifecycle has a slightly different address format. type ResourceMode rune diff --git a/internal/terraform/addrs/resource_phase.go b/internal/terraform/addrs/resource_phase.go index 9bdbdc42..c62a7fc8 100644 --- a/internal/terraform/addrs/resource_phase.go +++ b/internal/terraform/addrs/resource_phase.go @@ -44,6 +44,12 @@ func (rp ResourceInstancePhase) String() string { return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) } +func (rp ResourceInstancePhase) UniqueKey() UniqueKey { + return rp // A ResourceInstancePhase is its own UniqueKey +} + +func (rp ResourceInstancePhase) uniqueKeySigil() {} + // ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. type ResourceInstancePhaseType string @@ -103,3 +109,9 @@ func (rp ResourcePhase) String() string { // because this special address type should never be exposed in the UI. return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) } + +func (rp ResourcePhase) UniqueKey() UniqueKey { + return rp // A ResourcePhase is its own UniqueKey +} + +func (rp ResourcePhase) uniqueKeySigil() {} diff --git a/internal/terraform/addrs/resource_test.go b/internal/terraform/addrs/resource_test.go index fbaa981f..d68d2b5d 100644 --- a/internal/terraform/addrs/resource_test.go +++ b/internal/terraform/addrs/resource_test.go @@ -215,6 +215,77 @@ func TestAbsResourceInstanceEqual_false(t *testing.T) { } } +func TestAbsResourceUniqueKey(t *testing.T) { + resourceAddr1 := Resource{ + Mode: ManagedResourceMode, + Type: "a", + Name: "b1", + }.Absolute(RootModuleInstance) + resourceAddr2 := Resource{ + Mode: ManagedResourceMode, + Type: "a", + Name: "b2", + }.Absolute(RootModuleInstance) + resourceAddr3 := Resource{ + Mode: ManagedResourceMode, + Type: "a", + Name: "in_module", + }.Absolute(RootModuleInstance.Child("boop", NoKey)) + + tests := []struct { + Reciever AbsResource + Other UniqueKeyer + WantEqual bool + }{ + { + resourceAddr1, + resourceAddr1, + true, + }, + { + resourceAddr1, + resourceAddr2, + false, + }, + { + resourceAddr1, + resourceAddr3, + false, + }, + { + resourceAddr3, + resourceAddr3, + true, + }, + { + resourceAddr1, + resourceAddr1.Instance(NoKey), + false, // no-key instance key is distinct from its resource even though they have the same String result + }, + { + resourceAddr1, + resourceAddr1.Instance(IntKey(1)), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s matches %T %s?", test.Reciever, test.Other, test.Other), func(t *testing.T) { + rKey := test.Reciever.UniqueKey() + oKey := test.Other.UniqueKey() + + gotEqual := rKey == oKey + if gotEqual != test.WantEqual { + t.Errorf( + "wrong result\nreceiver: %s\nother: %s (%T)\ngot: %t\nwant: %t", + test.Reciever, test.Other, test.Other, + gotEqual, test.WantEqual, + ) + } + }) + } +} + func TestConfigResourceEqual_true(t *testing.T) { resources := []ConfigResource{ { diff --git a/internal/terraform/addrs/self.go b/internal/terraform/addrs/self.go index 7f24eaf0..64c8f6ec 100644 --- a/internal/terraform/addrs/self.go +++ b/internal/terraform/addrs/self.go @@ -12,3 +12,9 @@ func (s selfT) referenceableSigil() { func (s selfT) String() string { return "self" } + +func (s selfT) UniqueKey() UniqueKey { + return Self // Self is its own UniqueKey +} + +func (s selfT) uniqueKeySigil() {} diff --git a/internal/terraform/addrs/set.go b/internal/terraform/addrs/set.go new file mode 100644 index 00000000..ef82c591 --- /dev/null +++ b/internal/terraform/addrs/set.go @@ -0,0 +1,43 @@ +package addrs + +// Set represents a set of addresses of types that implement UniqueKeyer. +type Set map[UniqueKey]UniqueKeyer + +func (s Set) Has(addr UniqueKeyer) bool { + _, exists := s[addr.UniqueKey()] + return exists +} + +func (s Set) Add(addr UniqueKeyer) { + s[addr.UniqueKey()] = addr +} + +func (s Set) Remove(addr UniqueKeyer) { + delete(s, addr.UniqueKey()) +} + +func (s Set) Union(other Set) Set { + ret := make(Set) + for k, addr := range s { + ret[k] = addr + } + for k, addr := range other { + ret[k] = addr + } + return ret +} + +func (s Set) Intersection(other Set) Set { + ret := make(Set) + for k, addr := range s { + if _, exists := other[k]; exists { + ret[k] = addr + } + } + for k, addr := range other { + if _, exists := s[k]; exists { + ret[k] = addr + } + } + return ret +} diff --git a/internal/terraform/addrs/targetable.go b/internal/terraform/addrs/targetable.go index 16819a5a..1aa3ef1f 100644 --- a/internal/terraform/addrs/targetable.go +++ b/internal/terraform/addrs/targetable.go @@ -13,6 +13,10 @@ type Targetable interface { // A targetable address always contains at least itself. TargetContains(other Targetable) bool + // AddrType returns the address type for comparison with other Targetable + // addresses. + AddrType() TargetableAddrType + // String produces a string representation of the address that could be // parsed as a HCL traversal and passed to ParseTarget to produce an // identical result. @@ -24,3 +28,13 @@ type targetable struct { func (r targetable) targetableSigil() { } + +type TargetableAddrType int + +const ( + ConfigResourceAddrType TargetableAddrType = iota + AbsResourceInstanceAddrType + AbsResourceAddrType + ModuleAddrType + ModuleInstanceAddrType +) diff --git a/internal/terraform/addrs/terraform_attr.go b/internal/terraform/addrs/terraform_attr.go index a880182a..d3d11677 100644 --- a/internal/terraform/addrs/terraform_attr.go +++ b/internal/terraform/addrs/terraform_attr.go @@ -10,3 +10,9 @@ type TerraformAttr struct { func (ta TerraformAttr) String() string { return "terraform." + ta.Name } + +func (ta TerraformAttr) UniqueKey() UniqueKey { + return ta // A TerraformAttr is its own UniqueKey +} + +func (ta TerraformAttr) uniqueKeySigil() {} diff --git a/internal/terraform/addrs/unique_key.go b/internal/terraform/addrs/unique_key.go new file mode 100644 index 00000000..c3321a29 --- /dev/null +++ b/internal/terraform/addrs/unique_key.go @@ -0,0 +1,23 @@ +package addrs + +// UniqueKey is an interface implemented by values that serve as unique map +// keys for particular addresses. +// +// All implementations of UniqueKey are comparable and can thus be used as +// map keys. Unique keys generated from different address types are always +// distinct. All functionally-equivalent keys for the same address type +// always compare equal, and likewise functionally-different values do not. +type UniqueKey interface { + uniqueKeySigil() +} + +// UniqueKeyer is an interface implemented by types that can be represented +// by a unique key. +// +// Some address types naturally comply with the expectations of a UniqueKey +// and may thus be their own unique key type. However, address types that +// are not naturally comparable can implement this interface by returning +// proxy values. +type UniqueKeyer interface { + UniqueKey() UniqueKey +} diff --git a/internal/terraform/addrs/unique_key_test.go b/internal/terraform/addrs/unique_key_test.go new file mode 100644 index 00000000..0926a0c3 --- /dev/null +++ b/internal/terraform/addrs/unique_key_test.go @@ -0,0 +1,72 @@ +package addrs + +import ( + "fmt" + "testing" +) + +// TestUniqueKeyer aims to ensure that all of the types that have unique keys +// will continue to meet the UniqueKeyer contract under future changes. +// +// If you add a new implementation of UniqueKey, consider adding a test case +// for it here. +func TestUniqueKeyer(t *testing.T) { + tests := []UniqueKeyer{ + CountAttr{Name: "index"}, + ForEachAttr{Name: "key"}, + TerraformAttr{Name: "workspace"}, + PathAttr{Name: "module"}, + InputVariable{Name: "foo"}, + ModuleCall{Name: "foo"}, + ModuleCallInstance{ + Call: ModuleCall{Name: "foo"}, + Key: StringKey("a"), + }, + ModuleCallOutput{ + Call: ModuleCall{Name: "foo"}, + Name: "bar", + }, + ModuleCallInstanceOutput{ + Call: ModuleCallInstance{ + Call: ModuleCall{Name: "foo"}, + Key: StringKey("a"), + }, + Name: "bar", + }, + Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(1), + }, + RootModuleInstance, + RootModuleInstance.Child("foo", NoKey), + RootModuleInstance.ResourceInstance( + DataResourceMode, + "boop", + "beep", + NoKey, + ), + Self, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s", test), func(t *testing.T) { + a := test.UniqueKey() + b := test.UniqueKey() + + // The following comparison will panic if the unique key is not + // of a comparable type. + if a != b { + t.Fatalf("the two unique keys are not equal\na: %#v\b: %#v", a, b) + } + }) + } +} diff --git a/internal/terraform/configs/checks.go b/internal/terraform/configs/checks.go new file mode 100644 index 00000000..2de35c2d --- /dev/null +++ b/internal/terraform/configs/checks.go @@ -0,0 +1,132 @@ +package configs + +import ( + "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/lang" + "github.com/hashicorp/hcl/v2" +) + +// CheckRule represents a configuration-defined validation rule, precondition, +// or postcondition. Blocks of this sort can appear in a few different places +// in configuration, including "validation" blocks for variables, +// and "precondition" and "postcondition" blocks for resources. +type CheckRule struct { + // Condition is an expression that must evaluate to true if the condition + // holds or false if it does not. If the expression produces an error then + // that's considered to be a bug in the module defining the check. + // + // The available variables in a condition expression vary depending on what + // a check is attached to. For example, validation rules attached to + // input variables can only refer to the variable that is being validated. + Condition hcl.Expression + + // ErrorMessage should be one or more full sentences, which should be in + // English for consistency with the rest of the error message output but + // can in practice be in any language. The message should describe what is + // required for the condition to return true in a way that would make sense + // to a caller of the module. + // + // The error message expression has the same variables available for + // interpolation as the corresponding condition. + ErrorMessage hcl.Expression + + DeclRange hcl.Range +} + +// validateSelfReferences looks for references in the check rule matching the +// specified resource address, returning error diagnostics if such a reference +// is found. +func (cr *CheckRule) validateSelfReferences(checkType string, addr addrs.Resource) hcl.Diagnostics { + var diags hcl.Diagnostics + refs, _ := lang.References(cr.Condition.Variables()) + for _, ref := range refs { + var refAddr addrs.Resource + + switch rs := ref.Subject.(type) { + case addrs.Resource: + refAddr = rs + case addrs.ResourceInstance: + refAddr = rs.Resource + default: + continue + } + + if refAddr.Equal(addr) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid reference in %s", checkType), + Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addr.String()), + Subject: cr.Condition.Range().Ptr(), + }) + break + } + } + return diags +} + +// decodeCheckRuleBlock decodes the contents of the given block as a check rule. +// +// Unlike most of our "decode..." functions, this one can be applied to blocks +// of various types as long as their body structures are "check-shaped". The +// function takes the containing block only because some error messages will +// refer to its location, and the returned object's DeclRange will be the +// block's header. +func decodeCheckRuleBlock(block *hcl.Block, override bool) (*CheckRule, hcl.Diagnostics) { + var diags hcl.Diagnostics + cr := &CheckRule{ + DeclRange: block.DefRange, + } + + if override { + // For now we'll just forbid overriding check blocks, to simplify + // the initial design. If we can find a clear use-case for overriding + // checks in override files and there's a way to define it that + // isn't confusing then we could relax this. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Can't override %s blocks", block.Type), + Detail: fmt.Sprintf("Override files cannot override %q blocks.", block.Type), + Subject: cr.DeclRange.Ptr(), + }) + return cr, diags + } + + content, moreDiags := block.Body.Content(checkRuleBlockSchema) + diags = append(diags, moreDiags...) + + if attr, exists := content.Attributes["condition"]; exists { + cr.Condition = attr.Expr + + if len(cr.Condition.Variables()) == 0 { + // A condition expression that doesn't refer to any variable is + // pointless, because its result would always be a constant. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid %s expression", block.Type), + Detail: "The condition expression must refer to at least one object from elsewhere in the configuration, or else its result would not be checking anything.", + Subject: cr.Condition.Range().Ptr(), + }) + } + } + + if attr, exists := content.Attributes["error_message"]; exists { + cr.ErrorMessage = attr.Expr + } + + return cr, diags +} + +var checkRuleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "condition", + Required: true, + }, + { + Name: "error_message", + Required: true, + }, + }, +} diff --git a/internal/terraform/configs/cloud.go b/internal/terraform/configs/cloud.go new file mode 100644 index 00000000..1ed6482e --- /dev/null +++ b/internal/terraform/configs/cloud.go @@ -0,0 +1,27 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" +) + +// Cloud represents a "cloud" block inside a "terraform" block in a module +// or file. +type CloudConfig struct { + Config hcl.Body + + DeclRange hcl.Range +} + +func decodeCloudBlock(block *hcl.Block) (*CloudConfig, hcl.Diagnostics) { + return &CloudConfig{ + Config: block.Body, + DeclRange: block.DefRange, + }, nil +} + +func (c *CloudConfig) ToBackendConfig() Backend { + return Backend{ + Type: "cloud", + Config: c.Config, + } +} diff --git a/internal/terraform/configs/config.go b/internal/terraform/configs/config.go index 2cac1c6c..7f4ecf3d 100644 --- a/internal/terraform/configs/config.go +++ b/internal/terraform/configs/config.go @@ -2,9 +2,11 @@ package configs import ( "fmt" + "log" "sort" "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/depsfile" "github.com/camptocamp/terraboard/internal/terraform/getproviders" version "github.com/hashicorp/go-version" "github.com/hashicorp/hcl/v2" @@ -55,10 +57,12 @@ type Config struct { CallRange hcl.Range // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. + // from, as specified in configuration. SourceAddrRaw is the same + // information, but as the raw string the user originally entered. // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr string + // These fields are meaningless for the root module, where their contents are undefined. + SourceAddr addrs.ModuleSource + SourceAddrRaw string // SourceAddrRange is the location in the configuration source where the // SourceAddr value was set, for use in diagnostic messages. @@ -82,7 +86,7 @@ type Config struct { // determine which modules require which providers. type ModuleRequirements struct { Name string - SourceAddr string + SourceAddr addrs.ModuleSource SourceDir string Requirements getproviders.Requirements Children map[string]*ModuleRequirements @@ -175,6 +179,106 @@ func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { return current } +// EntersNewPackage returns true if this call is to an external module, either +// directly via a remote source address or indirectly via a registry source +// address. +// +// Other behaviors in Terraform may treat package crossings as a special +// situation, because that indicates that the caller and callee can change +// independently of one another and thus we should disallow using any features +// where the caller assumes anything about the callee other than its input +// variables, required provider configurations, and output values. +// +// It's not meaningful to ask if the Config representing the root module enters +// a new package because the root module is always outside of all module +// packages, and so this function will arbitrarily return false in that case. +func (c *Config) EntersNewPackage() bool { + return moduleSourceAddrEntersNewPackage(c.SourceAddr) +} + +// VerifyDependencySelections checks whether the given locked dependencies +// are acceptable for all of the version constraints reported in the +// configuration tree represented by the reciever. +// +// This function will errors only if any of the locked dependencies are out of +// range for corresponding constraints in the configuration. If there are +// multiple inconsistencies then it will attempt to describe as many of them +// as possible, rather than stopping at the first problem. +// +// It's typically the responsibility of "terraform init" to change the locked +// dependencies to conform with the configuration, and so +// VerifyDependencySelections is intended for other commands to check whether +// it did so correctly and to catch if anything has changed in configuration +// since the last "terraform init" which requires re-initialization. However, +// it's up to the caller to decide how to advise users recover from these +// errors, because the advise can vary depending on what operation the user +// is attempting. +func (c *Config) VerifyDependencySelections(depLocks *depsfile.Locks) []error { + var errs []error + + reqs, diags := c.ProviderRequirements() + if diags.HasErrors() { + // It should be very unusual to get here, but unfortunately we can + // end up here in some edge cases where the config loader doesn't + // process version constraint strings in exactly the same way as + // the requirements resolver. (See the addProviderRequirements method + // for more information.) + errs = append(errs, fmt.Errorf("failed to determine the configuration's provider requirements: %s", diags.Error())) + } + + for providerAddr, constraints := range reqs { + if !depsfile.ProviderIsLockable(providerAddr) { + continue // disregard builtin providers, and such + } + if depLocks != nil && depLocks.ProviderIsOverridden(providerAddr) { + // The "overridden" case is for unusual special situations like + // dev overrides, so we'll explicitly note it in the logs just in + // case we see bug reports with these active and it helps us + // understand why we ended up using the "wrong" plugin. + log.Printf("[DEBUG] Config.VerifyDependencySelections: skipping %s because it's overridden by a special configuration setting", providerAddr) + continue + } + + var lock *depsfile.ProviderLock + if depLocks != nil { // Should always be true in main code, but unfortunately sometimes not true in old tests that don't fill out arguments completely + lock = depLocks.Provider(providerAddr) + } + if lock == nil { + log.Printf("[TRACE] Config.VerifyDependencySelections: provider %s has no lock file entry to satisfy %q", providerAddr, getproviders.VersionConstraintsString(constraints)) + errs = append(errs, fmt.Errorf("provider %s: required by this configuration but no version is selected", providerAddr)) + continue + } + + selectedVersion := lock.Version() + allowedVersions := getproviders.MeetingConstraints(constraints) + log.Printf("[TRACE] Config.VerifyDependencySelections: provider %s has %s to satisfy %q", providerAddr, selectedVersion.String(), getproviders.VersionConstraintsString(constraints)) + if !allowedVersions.Has(selectedVersion) { + // The most likely cause of this is that the author of a module + // has changed its constraints, but this could also happen in + // some other unusual situations, such as the user directly + // editing the lock file to record something invalid. We'll + // distinguish those cases here in order to avoid the more + // specific error message potentially being a red herring in + // the edge-cases. + currentConstraints := getproviders.VersionConstraintsString(constraints) + lockedConstraints := getproviders.VersionConstraintsString(lock.VersionConstraints()) + switch { + case currentConstraints != lockedConstraints: + errs = append(errs, fmt.Errorf("provider %s: locked version selection %s doesn't match the updated version constraints %q", providerAddr, selectedVersion.String(), currentConstraints)) + default: + errs = append(errs, fmt.Errorf("provider %s: version constraints %q don't match the locked version selection %s", providerAddr, currentConstraints, selectedVersion.String())) + } + } + } + + // Return multiple errors in an arbitrary-but-deterministic order. + sort.Slice(errs, func(i, j int) bool { + return errs[i].Error() < errs[j].Error() + }) + + return errs +} + // ProviderRequirements searches the full tree of modules under the receiver // for both explicit and implicit dependencies on providers. // diff --git a/internal/terraform/configs/config_build.go b/internal/terraform/configs/config_build.go index dc07d58d..4534f63e 100644 --- a/internal/terraform/configs/config_build.go +++ b/internal/terraform/configs/config_build.go @@ -23,11 +23,15 @@ func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { cfg.Root = cfg // Root module is self-referential. cfg.Children, diags = buildChildModules(cfg, walker) - // Now that the config is built, we can connect the provider names to all - // the known types for validation. - cfg.resolveProviderTypes() + // Skip provider resolution if there are any errors, since the provider + // configurations themselves may not be valid. + if !diags.HasErrors() { + // Now that the config is built, we can connect the provider names to all + // the known types for validation. + cfg.resolveProviderTypes() + } - diags = append(diags, validateProviderConfigs(nil, cfg, false)...) + diags = append(diags, validateProviderConfigs(nil, cfg, nil)...) return cfg, diags } @@ -145,7 +149,7 @@ type ModuleRequest struct { // SourceAddr is the source address string provided by the user in // configuration. - SourceAddr string + SourceAddr addrs.ModuleSource // SourceAddrRange is the source range for the SourceAddr value as it // was provided in configuration. This can and should be used to generate diff --git a/internal/terraform/configs/config_build_test.go b/internal/terraform/configs/config_build_test.go index a977e432..274a5cd0 100644 --- a/internal/terraform/configs/config_build_test.go +++ b/internal/terraform/configs/config_build_test.go @@ -30,7 +30,7 @@ func TestBuildConfig(t *testing.T) { // SourceAddr as a path relative to our fixture directory. // A "real" implementation of ModuleWalker should accept the // various different source address syntaxes Terraform supports. - sourcePath := filepath.Join("testdata/config-build", req.SourceAddr) + sourcePath := filepath.Join("testdata/config-build", req.SourceAddr.String()) mod, diags := parser.LoadConfigDir(sourcePath) version, _ := version.NewVersion(fmt.Sprintf("1.0.%d", versionI)) @@ -86,7 +86,7 @@ func TestBuildConfigDiags(t *testing.T) { // SourceAddr as a path relative to our fixture directory. // A "real" implementation of ModuleWalker should accept the // various different source address syntaxes Terraform supports. - sourcePath := filepath.Join("testdata/nested-errors", req.SourceAddr) + sourcePath := filepath.Join("testdata/nested-errors", req.SourceAddr.String()) mod, diags := parser.LoadConfigDir(sourcePath) version, _ := version.NewVersion(fmt.Sprintf("1.0.%d", versionI)) @@ -130,7 +130,7 @@ func TestBuildConfigChildModuleBackend(t *testing.T) { // SourceAddr as a path relative to our fixture directory. // A "real" implementation of ModuleWalker should accept the // various different source address syntaxes Terraform supports. - sourcePath := filepath.Join("testdata/nested-backend-warning", req.SourceAddr) + sourcePath := filepath.Join("testdata/nested-backend-warning", req.SourceAddr.String()) mod, diags := parser.LoadConfigDir(sourcePath) version, _ := version.NewVersion("1.0.0") @@ -206,7 +206,7 @@ func TestBuildConfigInvalidModules(t *testing.T) { func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { // for simplicity, these tests will treat all source // addresses as relative to the root module - sourcePath := filepath.Join(path, req.SourceAddr) + sourcePath := filepath.Join(path, req.SourceAddr.String()) mod, diags := parser.LoadConfigDir(sourcePath) version, _ := version.NewVersion("1.0.0") return mod, version, diags @@ -224,7 +224,7 @@ func TestBuildConfigInvalidModules(t *testing.T) { } if !found { - t.Errorf("Expected error diagnostic containing %q", msg) + t.Errorf("Expected error diagnostic containing:\n %s", msg) } } @@ -241,7 +241,7 @@ func TestBuildConfigInvalidModules(t *testing.T) { } if !found { - t.Errorf("Unexpected error: %q", diag) + t.Errorf("Unexpected error:\n %s", diag) } } @@ -255,7 +255,7 @@ func TestBuildConfigInvalidModules(t *testing.T) { } if !found { - t.Errorf("Expected warning diagnostic containing %q", msg) + t.Errorf("Expected warning diagnostic containing:\n %s", msg) } } @@ -272,7 +272,7 @@ func TestBuildConfigInvalidModules(t *testing.T) { } if !found { - t.Errorf("Unexpected warning: %q", diag) + t.Errorf("Unexpected warning:\n %s", diag) } } diff --git a/internal/terraform/configs/config_test.go b/internal/terraform/configs/config_test.go index f8c0af06..4d6a9f33 100644 --- a/internal/terraform/configs/config_test.go +++ b/internal/terraform/configs/config_test.go @@ -9,6 +9,7 @@ import ( "github.com/zclconf/go-cty/cty" "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/depsfile" "github.com/camptocamp/terraboard/internal/terraform/getproviders" version "github.com/hashicorp/go-version" "github.com/hashicorp/hcl/v2/hclsyntax" @@ -55,8 +56,8 @@ func TestConfigProviderTypes_nested(t *testing.T) { got = cfg.ProviderTypes() want := []addrs.Provider{ - addrs.NewProvider(addrs.DefaultRegistryHost, "bar", "test"), - addrs.NewProvider(addrs.DefaultRegistryHost, "foo", "test"), + addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test"), + addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test"), addrs.NewDefaultProvider("test"), } @@ -105,7 +106,7 @@ func TestConfigResolveAbsProviderAddr(t *testing.T) { got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) want := addrs.AbsProviderConfig{ Module: addrs.RootModule, - Provider: addrs.NewProvider(addrs.DefaultRegistryHost, "foo", "test"), + Provider: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test"), Alias: "boop", } if got, want := got.String(), want.String(); got != want { @@ -124,7 +125,7 @@ func TestConfigProviderRequirements(t *testing.T) { assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") tlsProvider := addrs.NewProvider( - addrs.DefaultRegistryHost, + addrs.DefaultProviderRegistryHost, "hashicorp", "tls", ) happycloudProvider := addrs.NewProvider( @@ -167,7 +168,7 @@ func TestConfigProviderRequirementsShallow(t *testing.T) { assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") tlsProvider := addrs.NewProvider( - addrs.DefaultRegistryHost, + addrs.DefaultProviderRegistryHost, "hashicorp", "tls", ) nullProvider := addrs.NewDefaultProvider("null") @@ -203,7 +204,7 @@ func TestConfigProviderRequirementsByModule(t *testing.T) { assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") tlsProvider := addrs.NewProvider( - addrs.DefaultRegistryHost, + addrs.DefaultProviderRegistryHost, "hashicorp", "tls", ) happycloudProvider := addrs.NewProvider( @@ -221,7 +222,7 @@ func TestConfigProviderRequirementsByModule(t *testing.T) { assertNoDiagnostics(t, diags) want := &ModuleRequirements{ Name: "", - SourceAddr: "", + SourceAddr: nil, SourceDir: "testdata/provider-reqs", Requirements: getproviders.Requirements{ // Only the root module's version is present here @@ -235,7 +236,7 @@ func TestConfigProviderRequirementsByModule(t *testing.T) { Children: map[string]*ModuleRequirements{ "kinder": { Name: "kinder", - SourceAddr: "./child", + SourceAddr: addrs.ModuleSourceLocal("./child"), SourceDir: "testdata/provider-reqs/child", Requirements: getproviders.Requirements{ nullProvider: getproviders.MustParseVersionConstraints("= 2.0.1"), @@ -244,7 +245,7 @@ func TestConfigProviderRequirementsByModule(t *testing.T) { Children: map[string]*ModuleRequirements{ "nested": { Name: "nested", - SourceAddr: "./grandchild", + SourceAddr: addrs.ModuleSourceLocal("./grandchild"), SourceDir: "testdata/provider-reqs/child/grandchild", Requirements: getproviders.Requirements{ grandchildProvider: nil, @@ -262,12 +263,134 @@ func TestConfigProviderRequirementsByModule(t *testing.T) { } } +func TestVerifyDependencySelections(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + happycloudProvider := addrs.NewProvider( + svchost.Hostname("tf.example.com"), + "awesomecorp", "happycloud", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + configuredProvider := addrs.NewDefaultProvider("configured") + grandchildProvider := addrs.NewDefaultProvider("grandchild") + + tests := map[string]struct { + PrepareLocks func(*depsfile.Locks) + WantErrs []string + }{ + "empty locks": { + func(*depsfile.Locks) { + // Intentionally blank + }, + []string{ + `provider registry.terraform.io/hashicorp/configured: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/grandchild: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/implied: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/null: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/random: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/tls: required by this configuration but no version is selected`, + `provider tf.example.com/awesomecorp/happycloud: required by this configuration but no version is selected`, + }, + }, + "suitable locks": { + func(locks *depsfile.Locks) { + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(nullProvider, getproviders.MustParseVersion("2.0.1"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + nil, + }, + "null provider constraints changed": { + func(locks *depsfile.Locks) { + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + []string{ + `provider registry.terraform.io/hashicorp/null: locked version selection 3.0.0 doesn't match the updated version constraints "~> 2.0.0, 2.0.1"`, + }, + }, + "null provider lock changed": { + func(locks *depsfile.Locks) { + // In this case, we set the lock file version constraints to + // match the configuration, and so our error message changes + // to not assume the configuration changed anymore. + locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), nil) + + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + []string{ + `provider registry.terraform.io/hashicorp/null: version constraints "~> 2.0.0, 2.0.1" don't match the locked version selection 3.0.0`, + }, + }, + "overridden provider": { + func(locks *depsfile.Locks) { + locks.SetProviderOverridden(happycloudProvider) + }, + []string{ + // We still catch all of the other ones, because only happycloud was overridden + `provider registry.terraform.io/hashicorp/configured: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/grandchild: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/implied: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/null: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/random: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/tls: required by this configuration but no version is selected`, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + depLocks := depsfile.NewLocks() + test.PrepareLocks(depLocks) + gotErrs := cfg.VerifyDependencySelections(depLocks) + + var gotErrsStr []string + if gotErrs != nil { + gotErrsStr = make([]string, len(gotErrs)) + for i, err := range gotErrs { + gotErrsStr[i] = err.Error() + } + } + + if diff := cmp.Diff(test.WantErrs, gotErrsStr); diff != "" { + t.Errorf("wrong errors\n%s", diff) + } + }) + } +} + func TestConfigProviderForConfigAddr(t *testing.T) { cfg, diags := testModuleConfigFromDir("testdata/valid-modules/providers-fqns") assertNoDiagnostics(t, diags) got := cfg.ProviderForConfigAddr(addrs.NewDefaultLocalProviderConfig("foo-test")) - want := addrs.NewProvider(addrs.DefaultRegistryHost, "foo", "test") + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") if !got.Equals(want) { t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) } diff --git a/internal/terraform/configs/configload/copy_dir_test.go b/internal/terraform/configs/configload/copy_dir_test.go index 2f9f86b8..b7cc8049 100644 --- a/internal/terraform/configs/configload/copy_dir_test.go +++ b/internal/terraform/configs/configload/copy_dir_test.go @@ -21,14 +21,10 @@ import ( // └── main.tf func TestCopyDir_symlinks(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "copy-dir-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() moduleDir := filepath.Join(tmpdir, "modules") - err = os.Mkdir(moduleDir, os.ModePerm) + err := os.Mkdir(moduleDir, os.ModePerm) if err != nil { t.Fatal(err) } @@ -67,14 +63,10 @@ func TestCopyDir_symlinks(t *testing.T) { } func TestCopyDir_symlink_file(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "copy-file-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() moduleDir := filepath.Join(tmpdir, "modules") - err = os.Mkdir(moduleDir, os.ModePerm) + err := os.Mkdir(moduleDir, os.ModePerm) if err != nil { t.Fatal(err) } diff --git a/internal/terraform/configs/configload/inode.go b/internal/terraform/configs/configload/inode.go index 57df0414..cbd93204 100644 --- a/internal/terraform/configs/configload/inode.go +++ b/internal/terraform/configs/configload/inode.go @@ -1,3 +1,4 @@ +//go:build linux || darwin || openbsd || netbsd || solaris || dragonfly // +build linux darwin openbsd netbsd solaris dragonfly package configload diff --git a/internal/terraform/configs/configload/inode_freebsd.go b/internal/terraform/configs/configload/inode_freebsd.go index 4dc28eaa..cefd72ca 100644 --- a/internal/terraform/configs/configload/inode_freebsd.go +++ b/internal/terraform/configs/configload/inode_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package configload diff --git a/internal/terraform/configs/configload/inode_windows.go b/internal/terraform/configs/configload/inode_windows.go index 0d22e672..be26679a 100644 --- a/internal/terraform/configs/configload/inode_windows.go +++ b/internal/terraform/configs/configload/inode_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package configload diff --git a/internal/terraform/configs/configload/loader_load.go b/internal/terraform/configs/configload/loader_load.go index 1a07fa1b..1f8f43c0 100644 --- a/internal/terraform/configs/configload/loader_load.go +++ b/internal/terraform/configs/configload/loader_load.go @@ -21,7 +21,13 @@ import ( func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { rootMod, diags := l.parser.LoadConfigDir(rootDir) if rootMod == nil || diags.HasErrors() { - return nil, diags + // Ensure we return any parsed modules here so that required_version + // constraints can be verified even when encountering errors. + cfg := &configs.Config{ + Module: rootMod, + } + + return cfg, diags } cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) @@ -54,8 +60,11 @@ func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, var diags hcl.Diagnostics - // Check for inconsistencies between manifest and config - if req.SourceAddr != record.SourceAddr { + // Check for inconsistencies between manifest and config. + + // We ignore a nil SourceAddr here, which represents a failure during + // configuration parsing, and will be reported in a diagnostic elsewhere. + if req.SourceAddr != nil && req.SourceAddr.String() != record.SourceAddr { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Module source has changed", diff --git a/internal/terraform/configs/configload/loader_load_test.go b/internal/terraform/configs/configload/loader_load_test.go index 907cf295..aa5d1187 100644 --- a/internal/terraform/configs/configload/loader_load_test.go +++ b/internal/terraform/configs/configload/loader_load_test.go @@ -91,8 +91,90 @@ func TestLoaderLoadConfig_loadDiags(t *testing.T) { t.Fatalf("unexpected error from NewLoader: %s", err) } - _, diags := loader.LoadConfig(fixtureDir) + cfg, diags := loader.LoadConfig(fixtureDir) if !diags.HasErrors() { - t.Fatalf("success; want error") + t.Fatal("success; want error") + } + + if cfg == nil { + t.Fatal("partial config not returned with diagnostics") + } + + if cfg.Module == nil { + t.Fatal("expected config module") } } + +func TestLoaderLoadConfig_childProviderGrandchildCount(t *testing.T) { + // This test is focused on the specific situation where: + // - A child module contains a nested provider block, which is no longer + // recommended but supported for backward-compatibility. + // - A child of that child does _not_ contain a nested provider block, + // and is called with "count" (would also apply to "for_each" and + // "depends_on"). + // It isn't valid to use "count" with a module that _itself_ contains + // a provider configuration, but it _is_ valid for a module with a + // provider configuration to call another module with count. We previously + // botched this rule and so this is a regression test to cover the + // solution to that mistake: + // https://github.com/hashicorp/terraform/issues/31081 + + // Since this test is based on success rather than failure and it's + // covering a relatively large set of code where only a small part + // contributes to the test, we'll make sure to test both the success and + // failure cases here so that we'll have a better chance of noticing if a + // future change makes this succeed only because we've reorganized the code + // so that the check isn't happening at all anymore. + // + // If the "not okay" subtest fails, you should also be skeptical about + // whether the "okay" subtest is still valid, even if it happens to + // still be passing. + t.Run("okay", func(t *testing.T) { + fixtureDir := filepath.Clean("testdata/child-provider-grandchild-count") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + cfg, diags := loader.LoadConfig(fixtureDir) + assertNoDiagnostics(t, diags) + if cfg == nil { + t.Fatalf("config is nil; want non-nil") + } + + var gotPaths []string + cfg.DeepEach(func(c *configs.Config) { + gotPaths = append(gotPaths, strings.Join(c.Path, ".")) + }) + sort.Strings(gotPaths) + wantPaths := []string{ + "", // root module + "child", + "child.grandchild", + } + + if !reflect.DeepEqual(gotPaths, wantPaths) { + t.Fatalf("wrong module paths\ngot: %swant %s", spew.Sdump(gotPaths), spew.Sdump(wantPaths)) + } + }) + t.Run("not okay", func(t *testing.T) { + fixtureDir := filepath.Clean("testdata/child-provider-child-count") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + _, diags := loader.LoadConfig(fixtureDir) + if !diags.HasErrors() { + t.Fatalf("loading succeeded; want an error") + } + if got, want := diags.Error(), "Module is incompatible with count, for_each, and depends_on"; !strings.Contains(got, want) { + t.Errorf("missing expected error\nwant substring: %s\ngot: %s", want, got) + } + }) + +} diff --git a/internal/terraform/configs/configload/loader_snapshot.go b/internal/terraform/configs/configload/loader_snapshot.go index 6c47456f..24d0feb4 100644 --- a/internal/terraform/configs/configload/loader_snapshot.go +++ b/internal/terraform/configs/configload/loader_snapshot.go @@ -258,7 +258,7 @@ func (fs snapshotFS) Open(name string) (afero.File, error) { filenames = append(filenames, n) } sort.Strings(filenames) - return snapshotDir{ + return &snapshotDir{ filenames: filenames, }, nil } @@ -310,7 +310,7 @@ func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { if err != nil { return nil, err } - _, isDir := f.(snapshotDir) + _, isDir := f.(*snapshotDir) return snapshotFileInfo{ name: filepath.Base(name), isDir: isDir, @@ -377,9 +377,9 @@ type snapshotDir struct { at int } -var _ afero.File = snapshotDir{} +var _ afero.File = (*snapshotDir)(nil) -func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { +func (f *snapshotDir) Readdir(count int) ([]os.FileInfo, error) { names, err := f.Readdirnames(count) if err != nil { return nil, err @@ -394,7 +394,7 @@ func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { return ret, nil } -func (f snapshotDir) Readdirnames(count int) ([]string, error) { +func (f *snapshotDir) Readdirnames(count int) ([]string, error) { var outLen int names := f.filenames[f.at:] if count > 0 { diff --git a/internal/terraform/configs/configload/loader_snapshot_test.go b/internal/terraform/configs/configload/loader_snapshot_test.go index 24e41baf..cf1b9b26 100644 --- a/internal/terraform/configs/configload/loader_snapshot_test.go +++ b/internal/terraform/configs/configload/loader_snapshot_test.go @@ -1,6 +1,7 @@ package configload import ( + "os" "path/filepath" "reflect" "testing" @@ -71,6 +72,26 @@ module "child_b" { } +func TestLoadConfigWithSnapshot_invalidSource(t *testing.T) { + fixtureDir := filepath.Clean("testdata/already-installed-now-invalid") + + old, _ := os.Getwd() + os.Chdir(fixtureDir) + defer os.Chdir(old) + + loader, err := NewLoader(&Config{ + ModulesDir: ".terraform/modules", + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + _, _, diags := loader.LoadConfigWithSnapshot(".") + if !diags.HasErrors() { + t.Error("LoadConfigWithSnapshot succeeded; want errors") + } +} + func TestSnapshotRoundtrip(t *testing.T) { fixtureDir := filepath.Clean("testdata/already-installed") loader, err := NewLoader(&Config{ diff --git a/internal/terraform/configs/configload/loader_test.go b/internal/terraform/configs/configload/loader_test.go index 7b3483b4..396a449b 100644 --- a/internal/terraform/configs/configload/loader_test.go +++ b/internal/terraform/configs/configload/loader_test.go @@ -14,7 +14,7 @@ func assertNoDiagnostics(t *testing.T, diags hcl.Diagnostics) bool { func assertDiagnosticCount(t *testing.T, diags hcl.Diagnostics, want int) bool { t.Helper() - if len(diags) != 0 { + if len(diags) != want { t.Errorf("wrong number of diagnostics %d; want %d", len(diags), want) for _, diag := range diags { t.Logf("- %s", diag) diff --git a/internal/terraform/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json b/internal/terraform/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json new file mode 100644 index 00000000..32a4ace5 --- /dev/null +++ b/internal/terraform/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json @@ -0,0 +1 @@ +{"Modules":[{"Key":"","Source":"","Dir":"."},{"Key":"foo","Source":"./foo","Dir":"foo"},{"Key":"foo.bar","Source":"./bar","Dir":"foo/bar"}]} \ No newline at end of file diff --git a/internal/terraform/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf b/internal/terraform/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf new file mode 100644 index 00000000..48b5e2e0 --- /dev/null +++ b/internal/terraform/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf @@ -0,0 +1,3 @@ +output "hello" { + value = "Hello from foo/bar" +} diff --git a/internal/terraform/configs/configload/testdata/already-installed-now-invalid/foo/main.tf b/internal/terraform/configs/configload/testdata/already-installed-now-invalid/foo/main.tf new file mode 100644 index 00000000..9fba5723 --- /dev/null +++ b/internal/terraform/configs/configload/testdata/already-installed-now-invalid/foo/main.tf @@ -0,0 +1,3 @@ +module "bar" { + source = "${path.module}/bar" +} diff --git a/internal/terraform/configs/configload/testdata/already-installed-now-invalid/root.tf b/internal/terraform/configs/configload/testdata/already-installed-now-invalid/root.tf new file mode 100644 index 00000000..020494e8 --- /dev/null +++ b/internal/terraform/configs/configload/testdata/already-installed-now-invalid/root.tf @@ -0,0 +1,3 @@ +module "foo" { + source = "./foo" +} diff --git a/internal/terraform/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json b/internal/terraform/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json new file mode 100644 index 00000000..3a80b674 --- /dev/null +++ b/internal/terraform/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json @@ -0,0 +1,19 @@ +{ + "Modules": [ + { + "Key": "", + "Source": "", + "Dir": "." + }, + { + "Key": "child", + "Source": "./child", + "Dir": "testdata/child-provider-child-count/child" + }, + { + "Key": "child.grandchild", + "Source": "../grandchild", + "Dir": "testdata/child-provider-child-count/grandchild" + } + ] +} diff --git a/internal/terraform/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf b/internal/terraform/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf new file mode 100644 index 00000000..5b39941a --- /dev/null +++ b/internal/terraform/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf @@ -0,0 +1,4 @@ +module "child" { + source = "./child" + count = 1 +} diff --git a/internal/terraform/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf b/internal/terraform/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf new file mode 100644 index 00000000..524742c3 --- /dev/null +++ b/internal/terraform/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf @@ -0,0 +1,7 @@ +provider "boop" { + blah = true +} + +module "grandchild" { + source = "../grandchild" +} diff --git a/internal/terraform/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf b/internal/terraform/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf new file mode 100644 index 00000000..ccd9dcef --- /dev/null +++ b/internal/terraform/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf @@ -0,0 +1 @@ +# Intentionally blank diff --git a/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json b/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json new file mode 100644 index 00000000..a9239e3a --- /dev/null +++ b/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json @@ -0,0 +1,19 @@ +{ + "Modules": [ + { + "Key": "", + "Source": "", + "Dir": "." + }, + { + "Key": "child", + "Source": "./child", + "Dir": "testdata/child-provider-grandchild-count/child" + }, + { + "Key": "child.grandchild", + "Source": "../grandchild", + "Dir": "testdata/child-provider-grandchild-count/grandchild" + } + ] +} diff --git a/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf b/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf new file mode 100644 index 00000000..1f95749f --- /dev/null +++ b/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf b/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf new file mode 100644 index 00000000..8d3fe102 --- /dev/null +++ b/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf @@ -0,0 +1,12 @@ +provider "boop" { + blah = true +} + +module "grandchild" { + source = "../grandchild" + + # grandchild's caller (this file) has a legacy nested provider block, but + # grandchild itself does not and so it's valid to use "count" here even + # though it wouldn't be valid to call "child" (this file) with "count". + count = 2 +} diff --git a/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf b/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf new file mode 100644 index 00000000..ccd9dcef --- /dev/null +++ b/internal/terraform/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf @@ -0,0 +1 @@ +# Intentionally blank diff --git a/internal/terraform/configs/configschema/coerce_value.go b/internal/terraform/configs/configschema/coerce_value.go index 41a53374..66804c37 100644 --- a/internal/terraform/configs/configschema/coerce_value.go +++ b/internal/terraform/configs/configschema/coerce_value.go @@ -27,16 +27,19 @@ func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { } func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + convType := b.specType() + impliedType := convType.WithoutOptionalAttributesDeep() + switch { case in.IsNull(): - return cty.NullVal(b.ImpliedType()), nil + return cty.NullVal(impliedType), nil case !in.IsKnown(): - return cty.UnknownVal(b.ImpliedType()), nil + return cty.UnknownVal(impliedType), nil } ty := in.Type() if !ty.IsObjectType() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") + return cty.UnknownVal(impliedType), path.NewErrorf("an object is required") } for name := range ty.AttributeTypes() { @@ -46,29 +49,32 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { if _, defined := b.BlockTypes[name]; defined { continue } - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) + return cty.UnknownVal(impliedType), path.NewErrorf("unexpected attribute %q", name) } attrs := make(map[string]cty.Value) for name, attrS := range b.Attributes { + attrType := impliedType.AttributeType(name) + attrConvType := convType.AttributeType(name) + var val cty.Value switch { case ty.HasAttribute(name): val = in.GetAttr(name) case attrS.Computed || attrS.Optional: - val = cty.NullVal(attrS.Type) + val = cty.NullVal(attrType) default: - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) + return cty.UnknownVal(impliedType), path.NewErrorf("attribute %q is required", name) } - val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) + val, err := convert.Convert(val, attrConvType) if err != nil { - return cty.UnknownVal(b.ImpliedType()), err + return cty.UnknownVal(impliedType), append(path, cty.GetAttrStep{Name: name}).NewError(err) } - attrs[name] = val } + for typeName, blockS := range b.BlockTypes { switch blockS.Nesting { @@ -79,7 +85,7 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { val := in.GetAttr(typeName) attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) if err != nil { - return cty.UnknownVal(b.ImpliedType()), err + return cty.UnknownVal(impliedType), err } default: attrs[typeName] = blockS.EmptyValue() @@ -100,7 +106,7 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { } if !coll.CanIterateElements() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") + return cty.UnknownVal(impliedType), path.NewErrorf("must be a list") } l := coll.LengthInt() @@ -116,7 +122,7 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { idx, val := it.Element() val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) if err != nil { - return cty.UnknownVal(b.ImpliedType()), err + return cty.UnknownVal(impliedType), err } elems = append(elems, val) } @@ -141,7 +147,7 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { } if !coll.CanIterateElements() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") + return cty.UnknownVal(impliedType), path.NewErrorf("must be a set") } l := coll.LengthInt() @@ -157,7 +163,7 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { idx, val := it.Element() val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) if err != nil { - return cty.UnknownVal(b.ImpliedType()), err + return cty.UnknownVal(impliedType), err } elems = append(elems, val) } @@ -182,7 +188,7 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { } if !coll.CanIterateElements() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + return cty.UnknownVal(impliedType), path.NewErrorf("must be a map") } l := coll.LengthInt() if l == 0 { @@ -196,11 +202,11 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { var err error key, val := it.Element() if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + return cty.UnknownVal(impliedType), path.NewErrorf("must be a map") } val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) if err != nil { - return cty.UnknownVal(b.ImpliedType()), err + return cty.UnknownVal(impliedType), err } elems[key.AsString()] = val } @@ -240,11 +246,3 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { return cty.ObjectVal(attrs), nil } - -func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { - val, err := convert.Convert(in, a.Type) - if err != nil { - return cty.UnknownVal(a.Type), path.NewError(err) - } - return val, nil -} diff --git a/internal/terraform/configs/configschema/coerce_value_test.go b/internal/terraform/configs/configschema/coerce_value_test.go index 5f187b9c..0bfd29d4 100644 --- a/internal/terraform/configs/configschema/coerce_value_test.go +++ b/internal/terraform/configs/configschema/coerce_value_test.go @@ -538,6 +538,67 @@ func TestCoerceValue(t *testing.T) { }), ``, }, + "nested types": { + // handle NestedTypes + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Required: true, + }, + "baz": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + Optional: true, + }, + "fob": { + NestedType: &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.NullVal(cty.Map(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + "baz": cty.NullVal(cty.Map(cty.String)), + }), + }), + "fob": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "bar": cty.String, + }))), + }), + ``, + }, } for name, test := range tests { diff --git a/internal/terraform/configs/configschema/decoder_spec.go b/internal/terraform/configs/configschema/decoder_spec.go index fc518c65..d2d6616d 100644 --- a/internal/terraform/configs/configschema/decoder_spec.go +++ b/internal/terraform/configs/configschema/decoder_spec.go @@ -92,10 +92,10 @@ func (b *Block) DecoderSpec() hcldec.Spec { for name, blockS := range b.BlockTypes { if _, exists := ret[name]; exists { - // This indicates an invalid schema, since it's not valid to - // define both an attribute and a block type of the same name. - // However, we don't raise this here since it's checked by - // InternalValidate. + // This indicates an invalid schema, since it's not valid to define + // both an attribute and a block type of the same name. We assume + // that the provider has already used something like + // InternalValidate to validate their schema. continue } @@ -121,7 +121,7 @@ func (b *Block) DecoderSpec() hcldec.Spec { // implied type more complete, but if there are any // dynamically-typed attributes inside we must use a tuple // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { + if blockS.Block.specType().HasDynamicTypes() { ret[name] = &hcldec.BlockTupleSpec{ TypeName: name, Nested: childSpec, @@ -138,10 +138,12 @@ func (b *Block) DecoderSpec() hcldec.Spec { } case NestingSet: // We forbid dynamically-typed attributes inside NestingSet in - // InternalValidate, so we don't do anything special to handle - // that here. (There is no set analog to tuple and object types, - // because cty's set implementation depends on knowing the static - // type in order to properly compute its internal hashes.) + // InternalValidate, so we don't do anything special to handle that + // here. (There is no set analog to tuple and object types, because + // cty's set implementation depends on knowing the static type in + // order to properly compute its internal hashes.) We assume that + // the provider has already used something like InternalValidate to + // validate their schema. ret[name] = &hcldec.BlockSetSpec{ TypeName: name, Nested: childSpec, @@ -153,7 +155,7 @@ func (b *Block) DecoderSpec() hcldec.Spec { // implied type more complete, but if there are any // dynamically-typed attributes inside we must use a tuple // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { + if blockS.Block.specType().HasDynamicTypes() { ret[name] = &hcldec.BlockObjectSpec{ TypeName: name, Nested: childSpec, @@ -168,7 +170,8 @@ func (b *Block) DecoderSpec() hcldec.Spec { } default: // Invalid nesting type is just ignored. It's checked by - // InternalValidate. + // InternalValidate. We assume that the provider has already used + // something like InternalValidate to validate their schema. continue } } @@ -184,16 +187,13 @@ func (a *Attribute) decoderSpec(name string) hcldec.Spec { } if a.NestedType != nil { - // FIXME: a panic() is a bad UX. Fix this, probably by extending - // InternalValidate() to check Attribute schemas as well and calling it - // when we get the schema from the provider in Context(). if a.Type != cty.NilType { panic("Invalid attribute schema: NestedType and Type cannot both be set. This is a bug in the provider.") } - ty := a.NestedType.ImpliedType() + ty := a.NestedType.specType() ret.Type = ty - ret.Required = a.Required || a.NestedType.MinItems > 0 + ret.Required = a.Required return ret } @@ -207,9 +207,15 @@ func (a *Attribute) decoderSpec(name string) hcldec.Spec { // belong to their own cty.Object definitions. It is used in other functions // which themselves handle that recursion. func listOptionalAttrsFromObject(o *Object) []string { - var ret []string + ret := make([]string, 0) + + // This is unlikely to happen outside of tests. + if o == nil { + return ret + } + for name, attr := range o.Attributes { - if attr.Optional == true { + if attr.Optional || attr.Computed { ret = append(ret, name) } } diff --git a/internal/terraform/configs/configschema/decoder_spec_test.go b/internal/terraform/configs/configschema/decoder_spec_test.go index a6571eaa..12fdee76 100644 --- a/internal/terraform/configs/configschema/decoder_spec_test.go +++ b/internal/terraform/configs/configschema/decoder_spec_test.go @@ -1,10 +1,12 @@ package configschema import ( + "sort" "testing" "github.com/apparentlymart/go-dump/dump" "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" @@ -885,3 +887,43 @@ func TestAttributeDecoderSpec_panic(t *testing.T) { attrS.decoderSpec("attr") t.Errorf("expected panic") } + +func TestListOptionalAttrsFromObject(t *testing.T) { + tests := []struct { + input *Object + want []string + }{ + { + nil, + []string{}, + }, + { + &Object{}, + []string{}, + }, + { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + []string{"optional", "computed", "optional_computed"}, + }, + } + + for _, test := range tests { + got := listOptionalAttrsFromObject(test.input) + + // order is irrelevant + sort.Strings(got) + sort.Strings(test.want) + + if diff := cmp.Diff(got, test.want); diff != "" { + t.Fatalf("wrong result: %s\n", diff) + } + } +} diff --git a/internal/terraform/configs/configschema/implied_type.go b/internal/terraform/configs/configschema/implied_type.go index 4d4a042c..0a1dc75f 100644 --- a/internal/terraform/configs/configschema/implied_type.go +++ b/internal/terraform/configs/configschema/implied_type.go @@ -8,11 +8,23 @@ import ( // ImpliedType returns the cty.Type that would result from decoding a // configuration block using the receiving block schema. // +// The type returned from Block.ImpliedType differs from the type returned by +// hcldec.ImpliedType in that there will be no objects with optional +// attributes, since this value is not to be used for the decoding of +// configuration. +// // ImpliedType always returns a result, even if the given schema is // inconsistent. Code that creates configschema.Block objects should be // tested using the InternalValidate method to detect any inconsistencies // that would cause this method to fall back on defaults and assumptions. func (b *Block) ImpliedType() cty.Type { + return b.specType().WithoutOptionalAttributesDeep() +} + +// specType returns the cty.Type used for decoding a configuration +// block using the receiving block schema. This is the type used internally by +// hcldec to decode configuration. +func (b *Block) specType() cty.Type { if b == nil { return cty.EmptyObject } @@ -32,6 +44,9 @@ func (b *Block) ContainsSensitive() bool { if attrS.Sensitive { return true } + if attrS.NestedType != nil && attrS.NestedType.ContainsSensitive() { + return true + } } for _, blockS := range b.BlockTypes { if blockS.ContainsSensitive() { @@ -41,14 +56,20 @@ func (b *Block) ContainsSensitive() bool { return false } -// ImpliedType returns the cty.Type that would result from decoding a NestedType -// Attribute using the receiving block schema. +// ImpliedType returns the cty.Type that would result from decoding a +// NestedType Attribute using the receiving block schema. // // ImpliedType always returns a result, even if the given schema is // inconsistent. Code that creates configschema.Object objects should be tested // using the InternalValidate method to detect any inconsistencies that would // cause this method to fall back on defaults and assumptions. func (o *Object) ImpliedType() cty.Type { + return o.specType().WithoutOptionalAttributesDeep() +} + +// specType returns the cty.Type used for decoding a NestedType Attribute using +// the receiving block schema. +func (o *Object) specType() cty.Type { if o == nil { return cty.EmptyObject } @@ -56,7 +77,7 @@ func (o *Object) ImpliedType() cty.Type { attrTys := make(map[string]cty.Type, len(o.Attributes)) for name, attrS := range o.Attributes { if attrS.NestedType != nil { - attrTys[name] = attrS.NestedType.ImpliedType() + attrTys[name] = attrS.NestedType.specType() } else { attrTys[name] = attrS.Type } @@ -79,7 +100,7 @@ func (o *Object) ImpliedType() cty.Type { case NestingSet: return cty.Set(ret) default: // Should never happen - panic("invalid Nesting") + return cty.EmptyObject } } @@ -90,8 +111,8 @@ func (o *Object) ContainsSensitive() bool { if attrS.Sensitive { return true } - if attrS.NestedType != nil { - return attrS.NestedType.ContainsSensitive() + if attrS.NestedType != nil && attrS.NestedType.ContainsSensitive() { + return true } } return false diff --git a/internal/terraform/configs/configschema/implied_type_test.go b/internal/terraform/configs/configschema/implied_type_test.go index 7cd0f730..6e3c0de7 100644 --- a/internal/terraform/configs/configschema/implied_type_test.go +++ b/internal/terraform/configs/configschema/implied_type_test.go @@ -37,6 +37,7 @@ func TestBlockImpliedType(t *testing.T) { "optional_computed": { Type: cty.Map(cty.Bool), Optional: true, + Computed: true, }, }, }, @@ -111,6 +112,36 @@ func TestBlockImpliedType(t *testing.T) { }), }), }, + "nested objects with optional attrs": { + &Block{ + Attributes: map[string]*Attribute{ + "map": { + Optional: true, + NestedType: &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + }, + }, + }, + // The ImpliedType from the type-level block should not contain any + // optional attributes. + cty.Object(map[string]cty.Type{ + "map": cty.Map(cty.Object( + map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }, + )), + }), + }, } for name, test := range tests { @@ -123,6 +154,70 @@ func TestBlockImpliedType(t *testing.T) { } } +func TestBlockContainsSensitive(t *testing.T) { + tests := map[string]struct { + Schema *Block + Want bool + }{ + "object contains sensitive": { + &Block{ + Attributes: map[string]*Attribute{ + "sensitive": {Sensitive: true}, + }, + }, + true, + }, + "no sensitive attrs": { + &Block{ + Attributes: map[string]*Attribute{ + "insensitive": {}, + }, + }, + false, + }, + "nested object contains sensitive": { + &Block{ + Attributes: map[string]*Attribute{ + "nested": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "sensitive": {Sensitive: true}, + }, + }, + }, + }, + }, + true, + }, + "nested obj, no sensitive attrs": { + &Block{ + Attributes: map[string]*Attribute{ + "nested": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "public": {}, + }, + }, + }, + }, + }, + false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.Schema.ContainsSensitive() + if got != test.Want { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } + +} + func TestObjectImpliedType(t *testing.T) { tests := map[string]struct { Schema *Object @@ -132,36 +227,27 @@ func TestObjectImpliedType(t *testing.T) { nil, cty.EmptyObject, }, + "empty": { + &Object{}, + cty.EmptyObject, + }, "attributes": { &Object{ Nesting: NestingSingle, Attributes: map[string]*Attribute{ - "optional": { - Type: cty.String, - Optional: true, - }, - "required": { - Type: cty.Number, - Required: true, - }, - "computed": { - Type: cty.List(cty.Bool), - Computed: true, - }, - "optional_computed": { - Type: cty.Map(cty.Bool), - Optional: true, - }, + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, }, }, - cty.ObjectWithOptionalAttrs( + cty.Object( map[string]cty.Type{ "optional": cty.String, "required": cty.Number, "computed": cty.List(cty.Bool), "optional_computed": cty.Map(cty.Bool), }, - []string{"optional", "optional_computed"}, ), }, "nested attributes": { @@ -172,21 +258,42 @@ func TestObjectImpliedType(t *testing.T) { NestedType: &Object{ Nesting: NestingSingle, Attributes: map[string]*Attribute{ - "optional": { - Type: cty.String, - Optional: true, - }, - "required": { - Type: cty.Number, - Required: true, - }, - "computed": { - Type: cty.List(cty.Bool), - Computed: true, - }, - "optional_computed": { - Type: cty.Map(cty.Bool), - Optional: true, + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + Optional: true, + }, + }, + }, + cty.Object(map[string]cty.Type{ + "nested_type": cty.Object(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }), + }), + }, + "nested object-type attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nested_type": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + "object": { + Type: cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + }, []string{"optional"}), }, }, }, @@ -194,14 +301,15 @@ func TestObjectImpliedType(t *testing.T) { }, }, }, - cty.ObjectWithOptionalAttrs(map[string]cty.Type{ - "nested_type": cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + cty.Object(map[string]cty.Type{ + "nested_type": cty.Object(map[string]cty.Type{ "optional": cty.String, "required": cty.Number, "computed": cty.List(cty.Bool), "optional_computed": cty.Map(cty.Bool), - }, []string{"optional", "optional_computed"}), - }, []string{"nested_type"}), + "object": cty.Object(map[string]cty.Type{"optional": cty.String, "required": cty.Number}), + }), + }), }, "NestingList": { &Object{ @@ -210,7 +318,7 @@ func TestObjectImpliedType(t *testing.T) { "foo": {Type: cty.String, Optional: true}, }, }, - cty.List(cty.ObjectWithOptionalAttrs(map[string]cty.Type{"foo": cty.String}, []string{"foo"})), + cty.List(cty.Object(map[string]cty.Type{"foo": cty.String})), }, "NestingMap": { &Object{ @@ -309,6 +417,37 @@ func TestObjectContainsSensitive(t *testing.T) { }, false, }, + "several nested objects, one contains sensitive": { + &Object{ + Attributes: map[string]*Attribute{ + "alpha": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nonsensitive": {}, + }, + }, + }, + "beta": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "sensitive": {Sensitive: true}, + }, + }, + }, + "gamma": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nonsensitive": {}, + }, + }, + }, + }, + }, + true, + }, } for name, test := range tests { @@ -321,3 +460,145 @@ func TestObjectContainsSensitive(t *testing.T) { } } + +// Nested attribute should return optional object attributes for decoding. +func TestObjectSpecType(t *testing.T) { + tests := map[string]struct { + Schema *Object + Want cty.Type + }{ + "attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + cty.ObjectWithOptionalAttrs( + map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }, + []string{"optional", "computed", "optional_computed"}, + ), + }, + "nested attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nested_type": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "nested_type": cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }, []string{"optional", "computed", "optional_computed"}), + }, []string{"nested_type"}), + }, + "nested object-type attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nested_type": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + "object": { + Type: cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + }, []string{"optional"}), + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "nested_type": cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + "object": cty.ObjectWithOptionalAttrs(map[string]cty.Type{"optional": cty.String, "required": cty.Number}, []string{"optional"}), + }, []string{"optional", "computed", "optional_computed"}), + }, []string{"nested_type"}), + }, + "NestingList": { + &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + cty.List(cty.ObjectWithOptionalAttrs(map[string]cty.Type{"foo": cty.String}, []string{"foo"})), + }, + "NestingMap": { + &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String}, + }, + }, + cty.Map(cty.Object(map[string]cty.Type{"foo": cty.String})), + }, + "NestingSet": { + &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String}, + }, + }, + cty.Set(cty.Object(map[string]cty.Type{"foo": cty.String})), + }, + "deeply nested NestingList": { + &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": { + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "bar": {Type: cty.String}, + }, + }, + }, + }, + }, + cty.List(cty.Object(map[string]cty.Type{"foo": cty.List(cty.Object(map[string]cty.Type{"bar": cty.String}))})), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.Schema.specType() + if !got.Equals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/configs/configschema/internal_validate.go b/internal/terraform/configs/configschema/internal_validate.go index 9114e0ab..8876672d 100644 --- a/internal/terraform/configs/configschema/internal_validate.go +++ b/internal/terraform/configs/configschema/internal_validate.go @@ -11,74 +11,61 @@ import ( var validName = regexp.MustCompile(`^[a-z0-9_]+$`) -// InternalValidate returns an error if the receiving block and its child -// schema definitions have any consistencies with the documented rules for -// valid schema. +// InternalValidate returns an error if the receiving block and its child schema +// definitions have any inconsistencies with the documented rules for valid +// schema. // -// This is intended to be used within unit tests to detect when a given -// schema is invalid. +// This can be used within unit tests to detect when a given schema is invalid, +// and is run when terraform loads provider schemas during NewContext. func (b *Block) InternalValidate() error { if b == nil { return fmt.Errorf("top-level block schema is nil") } - return b.internalValidate("", nil) - + return b.internalValidate("") } -func (b *Block) internalValidate(prefix string, err error) error { +func (b *Block) internalValidate(prefix string) error { + var multiErr *multierror.Error + for name, attrS := range b.Attributes { if attrS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) continue } - if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - if !attrS.Optional && !attrS.Required && !attrS.Computed { - err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) - } - if attrS.Optional && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) - } - if attrS.Computed && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) - } - if attrS.Type == cty.NilType { - err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) - } + multiErr = multierror.Append(multiErr, attrS.internalValidate(name, prefix)) } for name, blockS := range b.BlockTypes { if blockS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: block schema is nil", prefix, name)) continue } if _, isAttr := b.Attributes[name]; isAttr { - err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) } else if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) } if blockS.MinItems < 0 || blockS.MaxItems < 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) } switch blockS.Nesting { case NestingSingle: switch { case blockS.MinItems != blockS.MaxItems: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) case blockS.MinItems < 0 || blockS.MinItems > 1: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) } case NestingGroup: if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) } case NestingList, NestingSet: if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) } if blockS.Nesting == NestingSet { ety := blockS.Block.ImpliedType() @@ -86,20 +73,87 @@ func (b *Block) internalValidate(prefix string, err error) error { // This is not permitted because the HCL (cty) set implementation // needs to know the exact type of set elements in order to // properly hash them, and so can't support mixed types. - err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) } } case NestingMap: if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) } default: - err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) } subPrefix := prefix + name + "." - err = blockS.Block.internalValidate(subPrefix, err) + multiErr = multierror.Append(multiErr, blockS.Block.internalValidate(subPrefix)) + } + + return multiErr.ErrorOrNil() +} + +// InternalValidate returns an error if the receiving attribute and its child +// schema definitions have any inconsistencies with the documented rules for +// valid schema. +func (a *Attribute) InternalValidate(name string) error { + if a == nil { + return fmt.Errorf("attribute schema is nil") + } + return a.internalValidate(name, "") +} + +func (a *Attribute) internalValidate(name, prefix string) error { + var err *multierror.Error + + /* FIXME: this validation breaks certain existing providers and cannot be enforced without coordination. + if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + */ + if !a.Optional && !a.Required && !a.Computed { + err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) + } + if a.Optional && a.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) + } + if a.Computed && a.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) + } + + if a.Type == cty.NilType && a.NestedType == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: either Type or NestedType must be defined", prefix, name)) + } + + if a.Type != cty.NilType { + if a.NestedType != nil { + err = multierror.Append(fmt.Errorf("%s: Type and NestedType cannot both be set", name)) + } + } + + if a.NestedType != nil { + switch a.NestedType.Nesting { + case NestingSingle, NestingMap: + // no validations to perform + case NestingList, NestingSet: + if a.NestedType.Nesting == NestingSet { + ety := a.NestedType.ImpliedType() + if ety.HasDynamicTypes() { + // This is not permitted because the HCL (cty) set implementation + // needs to know the exact type of set elements in order to + // properly hash them, and so can't support mixed types. + err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) + } + } + default: + err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, a.NestedType.Nesting)) + } + for name, attrS := range a.NestedType.Attributes { + if attrS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + err = multierror.Append(err, attrS.internalValidate(name, prefix)) + } } - return err + return err.ErrorOrNil() } diff --git a/internal/terraform/configs/configschema/internal_validate_test.go b/internal/terraform/configs/configschema/internal_validate_test.go index 512c7adb..3be461d4 100644 --- a/internal/terraform/configs/configschema/internal_validate_test.go +++ b/internal/terraform/configs/configschema/internal_validate_test.go @@ -10,172 +10,193 @@ import ( func TestBlockInternalValidate(t *testing.T) { tests := map[string]struct { - Block *Block - ErrCount int + Block *Block + Errs []string }{ "empty": { &Block{}, - 0, + []string{}, }, "valid": { &Block{ Attributes: map[string]*Attribute{ - "foo": &Attribute{ + "foo": { Type: cty.String, Required: true, }, - "bar": &Attribute{ + "bar": { Type: cty.String, Optional: true, }, - "baz": &Attribute{ + "baz": { Type: cty.String, Computed: true, }, - "baz_maybe": &Attribute{ + "baz_maybe": { Type: cty.String, Optional: true, Computed: true, }, }, BlockTypes: map[string]*NestedBlock{ - "single": &NestedBlock{ + "single": { Nesting: NestingSingle, Block: Block{}, }, - "single_required": &NestedBlock{ + "single_required": { Nesting: NestingSingle, Block: Block{}, MinItems: 1, MaxItems: 1, }, - "list": &NestedBlock{ + "list": { Nesting: NestingList, Block: Block{}, }, - "list_required": &NestedBlock{ + "list_required": { Nesting: NestingList, Block: Block{}, MinItems: 1, }, - "set": &NestedBlock{ + "set": { Nesting: NestingSet, Block: Block{}, }, - "set_required": &NestedBlock{ + "set_required": { Nesting: NestingSet, Block: Block{}, MinItems: 1, }, - "map": &NestedBlock{ + "map": { Nesting: NestingMap, Block: Block{}, }, }, }, - 0, + []string{}, }, "attribute with no flags set": { &Block{ Attributes: map[string]*Attribute{ - "foo": &Attribute{ + "foo": { Type: cty.String, }, }, }, - 1, // must set one of the flags + []string{"foo: must set Optional, Required or Computed"}, }, "attribute required and optional": { &Block{ Attributes: map[string]*Attribute{ - "foo": &Attribute{ + "foo": { Type: cty.String, Required: true, Optional: true, }, }, }, - 1, // both required and optional + []string{"foo: cannot set both Optional and Required"}, }, "attribute required and computed": { &Block{ Attributes: map[string]*Attribute{ - "foo": &Attribute{ + "foo": { Type: cty.String, Required: true, Computed: true, }, }, }, - 1, // both required and computed + []string{"foo: cannot set both Computed and Required"}, }, "attribute optional and computed": { &Block{ Attributes: map[string]*Attribute{ - "foo": &Attribute{ + "foo": { Type: cty.String, Optional: true, Computed: true, }, }, }, - 0, + []string{}, }, "attribute with missing type": { &Block{ Attributes: map[string]*Attribute{ - "foo": &Attribute{ + "foo": { Optional: true, }, }, }, - 1, // Type must be set + []string{"foo: either Type or NestedType must be defined"}, }, - "attribute with invalid name": { + /* FIXME: This caused errors when applied to existing providers (oci) + and cannot be enforced without coordination. + + "attribute with invalid name": {&Block{Attributes: + map[string]*Attribute{"fooBar": {Type: cty.String, Optional: + true, + }, + }, + }, + []string{"fooBar: name may contain only lowercase letters, digits and underscores"}, + }, + */ + "attribute with invalid NestedType attribute": { &Block{ Attributes: map[string]*Attribute{ - "fooBar": &Attribute{ - Type: cty.String, + "foo": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + Optional: true, + }, + }, + }, Optional: true, }, }, }, - 1, // name may not contain uppercase letters + []string{"foo: cannot set both Optional and Required"}, }, "block type with invalid name": { &Block{ BlockTypes: map[string]*NestedBlock{ - "fooBar": &NestedBlock{ + "fooBar": { Nesting: NestingSingle, }, }, }, - 1, // name may not contain uppercase letters + []string{"fooBar: name may contain only lowercase letters, digits and underscores"}, }, "colliding names": { &Block{ Attributes: map[string]*Attribute{ - "foo": &Attribute{ + "foo": { Type: cty.String, Optional: true, }, }, BlockTypes: map[string]*NestedBlock{ - "foo": &NestedBlock{ + "foo": { Nesting: NestingSingle, }, }, }, - 1, // "foo" is defined as both attribute and block type + []string{"foo: name defined as both attribute and child block type"}, }, "nested block with badness": { &Block{ BlockTypes: map[string]*NestedBlock{ - "bad": &NestedBlock{ + "bad": { Nesting: NestingSingle, Block: Block{ Attributes: map[string]*Attribute{ - "nested_bad": &Attribute{ + "nested_bad": { Type: cty.String, Required: true, Optional: true, @@ -185,16 +206,16 @@ func TestBlockInternalValidate(t *testing.T) { }, }, }, - 1, // nested_bad is both required and optional + []string{"bad.nested_bad: cannot set both Optional and Required"}, }, "nested list block with dynamically-typed attribute": { &Block{ BlockTypes: map[string]*NestedBlock{ - "bad": &NestedBlock{ + "bad": { Nesting: NestingList, Block: Block{ Attributes: map[string]*Attribute{ - "nested_bad": &Attribute{ + "nested_bad": { Type: cty.DynamicPseudoType, Optional: true, }, @@ -203,16 +224,16 @@ func TestBlockInternalValidate(t *testing.T) { }, }, }, - 0, + []string{}, }, "nested set block with dynamically-typed attribute": { &Block{ BlockTypes: map[string]*NestedBlock{ - "bad": &NestedBlock{ + "bad": { Nesting: NestingSet, Block: Block{ Attributes: map[string]*Attribute{ - "nested_bad": &Attribute{ + "nested_bad": { Type: cty.DynamicPseudoType, Optional: true, }, @@ -221,11 +242,11 @@ func TestBlockInternalValidate(t *testing.T) { }, }, }, - 1, // NestingSet blocks may not contain attributes of cty.DynamicPseudoType + []string{"bad: NestingSet blocks may not contain attributes of cty.DynamicPseudoType"}, }, "nil": { nil, - 1, // block is nil + []string{"top-level block schema is nil"}, }, "nil attr": { &Block{ @@ -233,7 +254,7 @@ func TestBlockInternalValidate(t *testing.T) { "bad": nil, }, }, - 1, // attribute schema is nil + []string{"bad: attribute schema is nil"}, }, "nil block type": { &Block{ @@ -241,18 +262,26 @@ func TestBlockInternalValidate(t *testing.T) { "bad": nil, }, }, - 1, // block schema is nil + []string{"bad: block schema is nil"}, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { errs := multierrorErrors(test.Block.InternalValidate()) - if got, want := len(errs), test.ErrCount; got != want { + if got, want := len(errs), len(test.Errs); got != want { t.Errorf("wrong number of errors %d; want %d", got, want) for _, err := range errs { t.Logf("- %s", err.Error()) } + } else { + if len(errs) > 0 { + for i := range errs { + if errs[i].Error() != test.Errs[i] { + t.Errorf("wrong error: got %s, want %s", errs[i].Error(), test.Errs[i]) + } + } + } } }) } diff --git a/internal/terraform/configs/configschema/marks.go b/internal/terraform/configs/configschema/marks.go index bf8f8156..855021ec 100644 --- a/internal/terraform/configs/configschema/marks.go +++ b/internal/terraform/configs/configschema/marks.go @@ -3,6 +3,7 @@ package configschema import ( "fmt" + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) @@ -11,6 +12,8 @@ import ( // blocks are descended (if present in the given value). func (b *Block) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { var pvm []cty.PathValueMarks + + // We can mark attributes as sensitive even if the value is null for name, attrS := range b.Attributes { if attrS.Sensitive { // Create a copy of the path, with this step added, to add to our PathValueMarks slice @@ -19,14 +22,33 @@ func (b *Block) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { attrPath = append(path, cty.GetAttrStep{Name: name}) pvm = append(pvm, cty.PathValueMarks{ Path: attrPath, - Marks: cty.NewValueMarks("sensitive"), + Marks: cty.NewValueMarks(marks.Sensitive), }) } } + // If the value is null, no other marks are possible if val.IsNull() { return pvm } + + // Extract marks for nested attribute type values + for name, attrS := range b.Attributes { + // If the attribute has no nested type, or the nested type doesn't + // contain any sensitive attributes, skip inspecting it + if attrS.NestedType == nil || !attrS.NestedType.ContainsSensitive() { + continue + } + + // Create a copy of the path, with this step added, to add to our PathValueMarks slice + attrPath := make(cty.Path, len(path), len(path)+1) + copy(attrPath, path) + attrPath = append(path, cty.GetAttrStep{Name: name}) + + pvm = append(pvm, attrS.NestedType.ValueMarks(val.GetAttr(name), attrPath)...) + } + + // Extract marks for nested blocks for name, blockS := range b.BlockTypes { // If our block doesn't contain any sensitive attributes, skip inspecting it if !blockS.Block.ContainsSensitive() { @@ -58,3 +80,72 @@ func (b *Block) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { } return pvm } + +// ValueMarks returns a set of path value marks for a given value and path, +// based on the sensitive flag for each attribute within the nested attribute. +// Attributes with nested types are descended (if present in the given value). +func (o *Object) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { + var pvm []cty.PathValueMarks + + if val.IsNull() || !val.IsKnown() { + return pvm + } + + for name, attrS := range o.Attributes { + // Skip attributes which can never produce sensitive path value marks + if !attrS.Sensitive && (attrS.NestedType == nil || !attrS.NestedType.ContainsSensitive()) { + continue + } + + switch o.Nesting { + case NestingSingle, NestingGroup: + // Create a path to this attribute + attrPath := make(cty.Path, len(path), len(path)+1) + copy(attrPath, path) + attrPath = append(path, cty.GetAttrStep{Name: name}) + + if attrS.Sensitive { + // If the entire attribute is sensitive, mark it so + pvm = append(pvm, cty.PathValueMarks{ + Path: attrPath, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } else { + // The attribute has a nested type which contains sensitive + // attributes, so recurse + pvm = append(pvm, attrS.NestedType.ValueMarks(val.GetAttr(name), attrPath)...) + } + case NestingList, NestingMap, NestingSet: + // For nested attribute types which have a non-single nesting mode, + // we add path value marks for each element of the collection + for it := val.ElementIterator(); it.Next(); { + idx, attrEV := it.Element() + attrV := attrEV.GetAttr(name) + + // Create a path to this element of the attribute's collection. Note + // that the path is extended in opposite order to the iteration order + // of the loops: index into the collection, then the contained + // attribute name. This is because we have one type + // representing multiple collection elements. + attrPath := make(cty.Path, len(path), len(path)+2) + copy(attrPath, path) + attrPath = append(path, cty.IndexStep{Key: idx}, cty.GetAttrStep{Name: name}) + + if attrS.Sensitive { + // If the entire attribute is sensitive, mark it so + pvm = append(pvm, cty.PathValueMarks{ + Path: attrPath, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } else { + // The attribute has a nested type which contains sensitive + // attributes, so recurse + pvm = append(pvm, attrS.NestedType.ValueMarks(attrV, attrPath)...) + } + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", attrS.NestedType.Nesting)) + } + } + return pvm +} diff --git a/internal/terraform/configs/configschema/marks_test.go b/internal/terraform/configs/configschema/marks_test.go index b5c672c3..cbe230cf 100644 --- a/internal/terraform/configs/configschema/marks_test.go +++ b/internal/terraform/configs/configschema/marks_test.go @@ -1,9 +1,9 @@ package configschema import ( - "fmt" "testing" + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) @@ -18,6 +18,20 @@ func TestBlockValueMarks(t *testing.T) { Type: cty.String, Sensitive: true, }, + "nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "boop": { + Type: cty.String, + }, + "honk": { + Type: cty.String, + Sensitive: true, + }, + }, + Nesting: NestingList, + }, + }, }, BlockTypes: map[string]*NestedBlock{ @@ -39,34 +53,46 @@ func TestBlockValueMarks(t *testing.T) { }, } - for _, tc := range []struct { + testCases := map[string]struct { given cty.Value expect cty.Value }{ - { + "unknown object": { cty.UnknownVal(schema.ImpliedType()), cty.UnknownVal(schema.ImpliedType()), }, - { + "null object": { cty.NullVal(schema.ImpliedType()), cty.NullVal(schema.ImpliedType()), }, - { + "object with unknown attributes and blocks": { cty.ObjectVal(map[string]cty.Value{ "sensitive": cty.UnknownVal(cty.String), "unsensitive": cty.UnknownVal(cty.String), - "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), }), cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.UnknownVal(cty.String).Mark("sensitive"), + "sensitive": cty.UnknownVal(cty.String).Mark(marks.Sensitive), "unsensitive": cty.UnknownVal(cty.String), - "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), }), }, - { + "object with block value": { cty.ObjectVal(map[string]cty.Value{ "sensitive": cty.NullVal(cty.String), "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), "list": cty.ListVal([]cty.Value{ cty.ObjectVal(map[string]cty.Value{ "sensitive": cty.UnknownVal(cty.String), @@ -79,22 +105,74 @@ func TestBlockValueMarks(t *testing.T) { }), }), cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.NullVal(cty.String).Mark("sensitive"), + "sensitive": cty.NullVal(cty.String).Mark(marks.Sensitive), "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), "list": cty.ListVal([]cty.Value{ cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.UnknownVal(cty.String).Mark("sensitive"), + "sensitive": cty.UnknownVal(cty.String).Mark(marks.Sensitive), "unsensitive": cty.UnknownVal(cty.String), }), cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.NullVal(cty.String).Mark("sensitive"), + "sensitive": cty.NullVal(cty.String).Mark(marks.Sensitive), "unsensitive": cty.NullVal(cty.String), }), }), }), }, - } { - t.Run(fmt.Sprintf("%#v", tc.given), func(t *testing.T) { + "object with known values and nested attribute": { + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.StringVal("foo"), + "unsensitive": cty.StringVal("bar"), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("foo"), + "honk": cty.StringVal("bar"), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.NullVal(cty.String), + "honk": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.UnknownVal(cty.String), + "honk": cty.UnknownVal(cty.String), + }), + }), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "sensitive": cty.String, + "unsensitive": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.StringVal("foo").Mark(marks.Sensitive), + "unsensitive": cty.StringVal("bar"), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("foo"), + "honk": cty.StringVal("bar").Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.NullVal(cty.String), + "honk": cty.NullVal(cty.String).Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.UnknownVal(cty.String), + "honk": cty.UnknownVal(cty.String).Mark(marks.Sensitive), + }), + }), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "sensitive": cty.String, + "unsensitive": cty.String, + }))), + }), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { got := tc.given.MarkWithPaths(schema.ValueMarks(tc.given, nil)) if !got.RawEquals(tc.expect) { t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.expect, got) diff --git a/internal/terraform/configs/configschema/path.go b/internal/terraform/configs/configschema/path.go index 4c48c1a0..d3584d3a 100644 --- a/internal/terraform/configs/configschema/path.go +++ b/internal/terraform/configs/configschema/path.go @@ -7,13 +7,19 @@ import ( // AttributeByPath looks up the Attribute schema which corresponds to the given // cty.Path. A nil value is returned if the given path does not correspond to a // specific attribute. -// TODO: this will need to be updated for nested attributes func (b *Block) AttributeByPath(path cty.Path) *Attribute { block := b - for _, step := range path { + for i, step := range path { switch step := step.(type) { case cty.GetAttrStep: if attr := block.Attributes[step.Name]; attr != nil { + // If the Attribute is defined with a NestedType and there's + // more to the path, descend into the NestedType + if attr.NestedType != nil && i < len(path)-1 { + return attr.NestedType.AttributeByPath(path[i+1:]) + } else if i < len(path)-1 { // There's more to the path, but not more to this Attribute. + return nil + } return attr } @@ -27,3 +33,23 @@ func (b *Block) AttributeByPath(path cty.Path) *Attribute { } return nil } + +// AttributeByPath recurses through a NestedType to look up the Attribute scheme +// which corresponds to the given cty.Path. A nil value is returned if the given +// path does not correspond to a specific attribute. +func (o *Object) AttributeByPath(path cty.Path) *Attribute { + for i, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + if attr := o.Attributes[step.Name]; attr != nil { + if attr.NestedType != nil && i < len(path)-1 { + return attr.NestedType.AttributeByPath(path[i+1:]) + } else if i < len(path)-1 { // There's more to the path, but not more to this Attribute. + return nil + } + return attr + } + } + } + return nil +} diff --git a/internal/terraform/configs/configschema/path_test.go b/internal/terraform/configs/configschema/path_test.go index c4f673ba..1ca7cb41 100644 --- a/internal/terraform/configs/configschema/path_test.go +++ b/internal/terraform/configs/configschema/path_test.go @@ -11,6 +11,24 @@ func TestAttributeByPath(t *testing.T) { Attributes: map[string]*Attribute{ "a1": {Description: "a1"}, "a2": {Description: "a2"}, + "a3": { + Description: "a3", + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "nt1": {Description: "nt1"}, + "nt2": { + Description: "nt2", + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "deeply_nested": {Description: "deeply_nested"}, + }, + }, + }, + }, + }, + }, }, BlockTypes: map[string]*NestedBlock{ "b1": { @@ -66,6 +84,16 @@ func TestAttributeByPath(t *testing.T) { "a2", true, }, + { + cty.GetAttrPath("a3").IndexInt(1).GetAttr("nt2"), + "nt2", + true, + }, + { + cty.GetAttrPath("a3").IndexInt(1).GetAttr("b2").IndexString("foo").GetAttr("no"), + "missing", + false, + }, { cty.GetAttrPath("b1"), "block", @@ -119,3 +147,83 @@ func TestAttributeByPath(t *testing.T) { }) } } + +func TestObject_AttributeByPath(t *testing.T) { + obj := &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "a1": {Description: "a1"}, + "a2": { + Description: "a2", + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "n1": {Description: "n1"}, + "n2": { + Description: "n2", + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "dn1": {Description: "dn1"}, + }, + }, + }, + }, + }, + }, + }, + } + + tests := []struct { + path cty.Path + attrDescription string + exists bool + }{ + { + cty.GetAttrPath("a2"), + "a2", + true, + }, + { + cty.GetAttrPath("a3"), + "missing", + false, + }, + { + cty.GetAttrPath("a2").IndexString("foo").GetAttr("n1"), + "n1", + true, + }, + { + cty.GetAttrPath("a2").IndexString("foo").GetAttr("n2").IndexInt(11).GetAttr("dn1"), + "dn1", + true, + }, + { + cty.GetAttrPath("a2").IndexString("foo").GetAttr("n2").IndexInt(11).GetAttr("dn1").IndexString("hello").GetAttr("nope"), + "missing_nested", + false, + }, + } + + for _, tc := range tests { + t.Run(tc.attrDescription, func(t *testing.T) { + attr := obj.AttributeByPath(tc.path) + if !tc.exists && attr == nil { + return + } + + if !tc.exists && attr != nil { + t.Fatalf("found Attribute, expected nil from path %#v\n", tc.path) + } + + if attr == nil { + t.Fatalf("missing attribute from path %#v\n", tc.path) + } + + if attr.Description != tc.attrDescription { + t.Fatalf("expected Attribute for %q, got %#v\n", tc.attrDescription, attr) + } + }) + } + +} diff --git a/internal/terraform/configs/configschema/schema.go b/internal/terraform/configs/configschema/schema.go index 581bead8..9ecc71e5 100644 --- a/internal/terraform/configs/configschema/schema.go +++ b/internal/terraform/configs/configschema/schema.go @@ -87,13 +87,6 @@ type Object struct { // many instances of the Object are allowed, how many labels it expects, and // how the resulting data will be converted into a data structure. Nesting NestingMode - - // MinItems and MaxItems set, for the NestingList and NestingSet nesting - // modes, lower and upper limits on the number of child blocks allowed - // of the given type. If both are left at zero, no limit is applied. - // These fields are ignored for other nesting modes and must both be left - // at zero. - MinItems, MaxItems int } // NestedBlock represents the embedding of one block within another. diff --git a/internal/terraform/configs/configschema/validate_traversal.go b/internal/terraform/configs/configschema/validate_traversal.go index 9f26c965..f211c1f7 100644 --- a/internal/terraform/configs/configschema/validate_traversal.go +++ b/internal/terraform/configs/configschema/validate_traversal.go @@ -17,7 +17,7 @@ import ( // diagnostics if any problems are found. // // This method is "optimistic" in that it will not return errors for possible -// problems that cannot be detected statically. It is possible that an +// problems that cannot be detected statically. It is possible that a // traversal which passed static validation will still fail when evaluated. func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { if !traversal.IsRelative() { diff --git a/internal/terraform/configs/configschema/validate_traversal_test.go b/internal/terraform/configs/configschema/validate_traversal_test.go index 242ab720..2000d2e7 100644 --- a/internal/terraform/configs/configschema/validate_traversal_test.go +++ b/internal/terraform/configs/configschema/validate_traversal_test.go @@ -58,7 +58,7 @@ func TestStaticValidateTraversal(t *testing.T) { }, { `obj.str.nonexist`, - `Unsupported attribute: This value does not have any attributes.`, + `Unsupported attribute: Can't access attributes on a primitive-typed value (string).`, }, { `obj.list`, diff --git a/internal/terraform/configs/experiments.go b/internal/terraform/configs/experiments.go index f459de22..e15ce162 100644 --- a/internal/terraform/configs/experiments.go +++ b/internal/terraform/configs/experiments.go @@ -9,6 +9,16 @@ import ( "github.com/zclconf/go-cty/cty" ) +// When developing UI for experimental features, you can temporarily disable +// the experiment warning by setting this package-level variable to a non-empty +// value using a link-time flag: +// +// go install -ldflags="-X 'github.com/hashicorp/terraform/internal/configs.disableExperimentWarnings=yes'" +// +// This functionality is for development purposes only and is not a feature we +// are committing to supporting for end users. +var disableExperimentWarnings = "" + // sniffActiveExperiments does minimal parsing of the given body for // "terraform" blocks with "experiments" attributes, returning the // experiments found. @@ -126,17 +136,19 @@ func decodeExperimentsAttr(attr *hcl.Attribute) (experiments.Set, hcl.Diagnostic // No error at all means it's valid and current. ret.Add(exp) - // However, experimental features are subject to breaking changes - // in future releases, so we'll warn about them to help make sure - // folks aren't inadvertently using them in places where that'd be - // inappropriate, particularly if the experiment is active in a - // shared module they depend on. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: fmt.Sprintf("Experimental feature %q is active", exp.Keyword()), - Detail: "Experimental features are subject to breaking changes in future minor or patch releases, based on feedback.\n\nIf you have feedback on the design of this feature, please open a GitHub issue to discuss it.", - Subject: expr.Range().Ptr(), - }) + if disableExperimentWarnings == "" { + // However, experimental features are subject to breaking changes + // in future releases, so we'll warn about them to help make sure + // folks aren't inadvertently using them in places where that'd be + // inappropriate, particularly if the experiment is active in a + // shared module they depend on. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: fmt.Sprintf("Experimental feature %q is active", exp.Keyword()), + Detail: "Experimental features are subject to breaking changes in future minor or patch releases, based on feedback.\n\nIf you have feedback on the design of this feature, please open a GitHub issue to discuss it.", + Subject: expr.Range().Ptr(), + }) + } default: // This should never happen, because GetCurrent is not documented @@ -186,7 +198,7 @@ func checkModuleExperiments(m *Module) hcl.Diagnostics { if !m.ActiveExperiments.Has(experiments.ModuleVariableOptionalAttrs) { for _, v := range m.Variables { - if typeConstraintHasOptionalAttrs(v.Type) { + if typeConstraintHasOptionalAttrs(v.ConstraintType) { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Optional object type attributes are experimental", diff --git a/internal/terraform/configs/module.go b/internal/terraform/configs/module.go index 5ca64f48..abe880d8 100644 --- a/internal/terraform/configs/module.go +++ b/internal/terraform/configs/module.go @@ -29,6 +29,7 @@ type Module struct { ActiveExperiments experiments.Set Backend *Backend + CloudConfig *CloudConfig ProviderConfigs map[string]*Provider ProviderRequirements *RequiredProviders ProviderLocalNames map[addrs.Provider]string @@ -42,6 +43,8 @@ type Module struct { ManagedResources map[string]*Resource DataResources map[string]*Resource + + Moved []*Moved } // File describes the contents of a single configuration file. @@ -61,6 +64,7 @@ type File struct { ActiveExperiments experiments.Set Backends []*Backend + CloudConfigs []*CloudConfig ProviderConfigs []*Provider ProviderMetas []*ProviderMeta RequiredProviders []*RequiredProviders @@ -73,6 +77,8 @@ type File struct { ManagedResources []*Resource DataResources []*Resource + + Moved []*Moved } // NewModule takes a list of primary files and a list of override files and @@ -186,6 +192,29 @@ func (m *Module) appendFile(file *File) hcl.Diagnostics { m.Backend = b } + for _, c := range file.CloudConfigs { + if m.CloudConfig != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate Terraform Cloud configurations", + Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring Terraform Cloud. Terraform Cloud was previously configured at %s.", m.CloudConfig.DeclRange), + Subject: &c.DeclRange, + }) + continue + } + + m.CloudConfig = c + } + + if m.Backend != nil && m.CloudConfig != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Both a backend and Terraform Cloud configuration are present", + Detail: fmt.Sprintf("A module may declare either one 'cloud' block configuring Terraform Cloud OR one 'backend' block configuring a state backend. Terraform Cloud is configured at %s; a backend is configured at %s. Remove the backend block to configure Terraform Cloud.", m.CloudConfig.DeclRange, m.Backend.DeclRange), + Subject: &m.Backend.DeclRange, + }) + } + for _, pc := range file.ProviderConfigs { key := pc.moduleUniqueKey() if existing, exists := m.ProviderConfigs[key]; exists { @@ -328,6 +357,11 @@ func (m *Module) appendFile(file *File) hcl.Diagnostics { } } + // "Moved" blocks just append, because they are all independent + // of one another at this level. (We handle any references between + // them at runtime.) + m.Moved = append(m.Moved, file.Moved...) + return diags } @@ -345,6 +379,7 @@ func (m *Module) mergeFile(file *File) hcl.Diagnostics { if len(file.Backends) != 0 { switch len(file.Backends) { case 1: + m.CloudConfig = nil // A backend block is mutually exclusive with a cloud one, and overwrites any cloud config m.Backend = file.Backends[0] default: // An override file with multiple backends is still invalid, even @@ -358,6 +393,23 @@ func (m *Module) mergeFile(file *File) hcl.Diagnostics { } } + if len(file.CloudConfigs) != 0 { + switch len(file.CloudConfigs) { + case 1: + m.Backend = nil // A cloud block is mutually exclusive with a backend one, and overwrites any backend + m.CloudConfig = file.CloudConfigs[0] + default: + // An override file with multiple cloud blocks is still invalid, even + // though it can override cloud/backend blocks from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate Terraform Cloud configurations", + Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring Terraform Cloud. Terraform Cloud was previously configured at %s.", file.CloudConfigs[0].DeclRange), + Subject: &file.CloudConfigs[1].DeclRange, + }) + } + } + for _, pc := range file.ProviderConfigs { key := pc.moduleUniqueKey() existing, exists := m.ProviderConfigs[key] @@ -482,6 +534,15 @@ func (m *Module) mergeFile(file *File) hcl.Diagnostics { diags = append(diags, mergeDiags...) } + for _, m := range file.Moved { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Cannot override 'moved' blocks", + Detail: "Records of moved objects can appear only in normal files, not in override files.", + Subject: m.DeclRange.Ptr(), + }) + } + return diags } diff --git a/internal/terraform/configs/module_call.go b/internal/terraform/configs/module_call.go index 0733db85..2dffd7df 100644 --- a/internal/terraform/configs/module_call.go +++ b/internal/terraform/configs/module_call.go @@ -3,6 +3,8 @@ package configs import ( "fmt" + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/getmodules" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" "github.com/hashicorp/hcl/v2/hclsyntax" @@ -12,7 +14,8 @@ import ( type ModuleCall struct { Name string - SourceAddr string + SourceAddr addrs.ModuleSource + SourceAddrRaw string SourceAddrRange hcl.Range SourceSet bool @@ -56,17 +59,75 @@ func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagno }) } - if attr, exists := content.Attributes["source"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) - diags = append(diags, valDiags...) - mc.SourceAddrRange = attr.Expr.Range() - mc.SourceSet = true - } - + haveVersionArg := false if attr, exists := content.Attributes["version"]; exists { var versionDiags hcl.Diagnostics mc.Version, versionDiags = decodeVersionConstraint(attr) diags = append(diags, versionDiags...) + haveVersionArg = true + } + + if attr, exists := content.Attributes["source"]; exists { + mc.SourceSet = true + mc.SourceAddrRange = attr.Expr.Range() + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddrRaw) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + var addr addrs.ModuleSource + var err error + if haveVersionArg { + addr, err = addrs.ParseModuleSourceRegistry(mc.SourceAddrRaw) + } else { + addr, err = addrs.ParseModuleSource(mc.SourceAddrRaw) + } + mc.SourceAddr = addr + if err != nil { + // NOTE: We leave mc.SourceAddr as nil for any situation where the + // source attribute is invalid, so any code which tries to carefully + // use the partial result of a failed config decode must be + // resilient to that. + mc.SourceAddr = nil + + // NOTE: In practice it's actually very unlikely to end up here, + // because our source address parser can turn just about any string + // into some sort of remote package address, and so for most errors + // we'll detect them only during module installation. There are + // still a _few_ purely-syntax errors we can catch at parsing time, + // though, mostly related to remote package sub-paths and local + // paths. + switch err := err.(type) { + case *getmodules.MaybeRelativePathErr: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source address", + Detail: fmt.Sprintf( + "Terraform failed to determine your intended installation method for remote module package %q.\n\nIf you intended this as a path relative to the current module, use \"./%s\" instead. The \"./\" prefix indicates that the address is a relative filesystem path.", + err.Addr, err.Addr, + ), + Subject: mc.SourceAddrRange.Ptr(), + }) + default: + if haveVersionArg { + // In this case we'll include some extra context that + // we assumed a registry source address due to the + // version argument. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid registry module source address", + Detail: fmt.Sprintf("Failed to parse module registry address: %s.\n\nTerraform assumed that you intended a module registry source address because you also set the argument \"version\", which applies only to registry modules.", err), + Subject: mc.SourceAddrRange.Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source address", + Detail: fmt.Sprintf("Failed to parse module source address: %s.", err), + Subject: mc.SourceAddrRange.Ptr(), + }) + } + } + } + } } if attr, exists := content.Attributes["count"]; exists { @@ -162,6 +223,19 @@ func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagno return mc, diags } +// EntersNewPackage returns true if this call is to an external module, either +// directly via a remote source address or indirectly via a registry source +// address. +// +// Other behaviors in Terraform may treat package crossings as a special +// situation, because that indicates that the caller and callee can change +// independently of one another and thus we should disallow using any features +// where the caller assumes anything about the callee other than its input +// variables, required provider configurations, and output values. +func (mc *ModuleCall) EntersNewPackage() bool { + return moduleSourceAddrEntersNewPackage(mc.SourceAddr) +} + // PassedProviderConfig represents a provider config explicitly passed down to // a child module, possibly giving it a new local address in the process. type PassedProviderConfig struct { @@ -200,3 +274,27 @@ var moduleBlockSchema = &hcl.BodySchema{ {Type: "provider", LabelNames: []string{"type"}}, }, } + +func moduleSourceAddrEntersNewPackage(addr addrs.ModuleSource) bool { + switch addr.(type) { + case nil: + // There are only two situations where we should get here: + // - We've been asked about the source address of the root module, + // which is always nil. + // - We've been asked about a ModuleCall that is part of the partial + // result of a failed decode. + // The root module exists outside of all module packages, so we'll + // just return false for that case. For the error case it doesn't + // really matter what we return as long as we don't panic, because + // we only make a best-effort to allow careful inspection of objects + // representing invalid configuration. + return false + case addrs.ModuleSourceLocal: + // Local source addresses are the only address type that remains within + // the same package. + return false + default: + // All other address types enter a new package. + return true + } +} diff --git a/internal/terraform/configs/module_call_test.go b/internal/terraform/configs/module_call_test.go index 79ad89e6..e284f73c 100644 --- a/internal/terraform/configs/module_call_test.go +++ b/internal/terraform/configs/module_call_test.go @@ -4,6 +4,7 @@ import ( "io/ioutil" "testing" + "github.com/camptocamp/terraboard/internal/terraform/addrs" "github.com/go-test/deep" "github.com/hashicorp/hcl/v2" ) @@ -26,9 +27,10 @@ func TestLoadModuleCall(t *testing.T) { gotModules := file.ModuleCalls wantModules := []*ModuleCall{ { - Name: "foo", - SourceAddr: "./foo", - SourceSet: true, + Name: "foo", + SourceAddr: addrs.ModuleSourceLocal("./foo"), + SourceAddrRaw: "./foo", + SourceSet: true, SourceAddrRange: hcl.Range{ Filename: "module-calls.tf", Start: hcl.Pos{Line: 3, Column: 12, Byte: 27}, @@ -41,9 +43,17 @@ func TestLoadModuleCall(t *testing.T) { }, }, { - Name: "bar", - SourceAddr: "hashicorp/bar/aws", - SourceSet: true, + Name: "bar", + SourceAddr: addrs.ModuleSourceRegistry{ + PackageAddr: addrs.ModuleRegistryPackage{ + Host: addrs.DefaultModuleRegistryHost, + Namespace: "hashicorp", + Name: "bar", + TargetSystem: "aws", + }, + }, + SourceAddrRaw: "hashicorp/bar/aws", + SourceSet: true, SourceAddrRange: hcl.Range{ Filename: "module-calls.tf", Start: hcl.Pos{Line: 8, Column: 12, Byte: 113}, @@ -56,9 +66,12 @@ func TestLoadModuleCall(t *testing.T) { }, }, { - Name: "baz", - SourceAddr: "git::https://example.com/", - SourceSet: true, + Name: "baz", + SourceAddr: addrs.ModuleSourceRemote{ + PackageAddr: addrs.ModulePackage("git::https://example.com/"), + }, + SourceAddrRaw: "git::https://example.com/", + SourceSet: true, SourceAddrRange: hcl.Range{ Filename: "module-calls.tf", Start: hcl.Pos{Line: 15, Column: 12, Byte: 193}, @@ -131,3 +144,49 @@ func TestLoadModuleCall(t *testing.T) { t.Error(problem) } } + +func TestModuleSourceAddrEntersNewPackage(t *testing.T) { + tests := []struct { + Addr string + Want bool + }{ + { + "./", + false, + }, + { + "../bork", + false, + }, + { + "/absolute/path", + true, + }, + { + "github.com/example/foo", + true, + }, + { + "hashicorp/subnets/cidr", // registry module + true, + }, + { + "registry.terraform.io/hashicorp/subnets/cidr", // registry module + true, + }, + } + + for _, test := range tests { + t.Run(test.Addr, func(t *testing.T) { + addr, err := addrs.ParseModuleSource(test.Addr) + if err != nil { + t.Fatalf("parsing failed for %q: %s", test.Addr, err) + } + + got := moduleSourceAddrEntersNewPackage(addr) + if got != test.Want { + t.Errorf("wrong result for %q\ngot: %#v\nwant: %#v", addr, got, test.Want) + } + }) + } +} diff --git a/internal/terraform/configs/module_merge.go b/internal/terraform/configs/module_merge.go index 0fb242c7..0c72071b 100644 --- a/internal/terraform/configs/module_merge.go +++ b/internal/terraform/configs/module_merge.go @@ -51,10 +51,15 @@ func (v *Variable) merge(ov *Variable) hcl.Diagnostics { } if ov.Type != cty.NilType { v.Type = ov.Type + v.ConstraintType = ov.ConstraintType } if ov.ParsingMode != 0 { v.ParsingMode = ov.ParsingMode } + if ov.NullableSet { + v.Nullable = ov.Nullable + v.NullableSet = ov.NullableSet + } // If the override file overrode type without default or vice-versa then // it may have created an invalid situation, which we'll catch now by @@ -67,7 +72,7 @@ func (v *Variable) merge(ov *Variable) hcl.Diagnostics { // constraint but the converted value cannot. In practice, this situation // should be rare since most of our conversions are interchangable. if v.Default != cty.NilVal { - val, err := convert.Convert(v.Default, v.Type) + val, err := convert.Convert(v.Default, v.ConstraintType) if err != nil { // What exactly we'll say in the error message here depends on whether // it was Default or Type that was overridden here. @@ -99,6 +104,16 @@ func (v *Variable) merge(ov *Variable) hcl.Diagnostics { } else { v.Default = val } + + // ensure a null default wasn't merged in when it is not allowed + if !v.Nullable && v.Default.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: "A null default value is not valid when nullable=false.", + Subject: &ov.DeclRange, + }) + } } return diags @@ -150,6 +165,7 @@ func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { if omc.SourceSet { mc.SourceAddr = omc.SourceAddr + mc.SourceAddrRaw = omc.SourceAddrRaw mc.SourceAddrRange = omc.SourceAddrRange mc.SourceSet = omc.SourceSet } @@ -226,6 +242,9 @@ func (r *Resource) merge(or *Resource, rps map[string]*RequiredProvider) hcl.Dia if len(or.Managed.IgnoreChanges) != 0 { r.Managed.IgnoreChanges = or.Managed.IgnoreChanges } + if or.Managed.IgnoreAllChanges { + r.Managed.IgnoreAllChanges = true + } if or.Managed.PreventDestroySet { r.Managed.PreventDestroy = or.Managed.PreventDestroy r.Managed.PreventDestroySet = or.Managed.PreventDestroySet diff --git a/internal/terraform/configs/module_merge_test.go b/internal/terraform/configs/module_merge_test.go index 6b044a53..f136cd90 100644 --- a/internal/terraform/configs/module_merge_test.go +++ b/internal/terraform/configs/module_merge_test.go @@ -24,7 +24,10 @@ func TestModuleOverrideVariable(t *testing.T) { Description: "b_override description", DescriptionSet: true, Default: cty.StringVal("b_override"), + Nullable: false, + NullableSet: true, Type: cty.String, + ConstraintType: cty.String, ParsingMode: VariableParseLiteral, DeclRange: hcl.Range{ Filename: "testdata/valid-modules/override-variable/primary.tf", @@ -45,7 +48,10 @@ func TestModuleOverrideVariable(t *testing.T) { Description: "base description", DescriptionSet: true, Default: cty.StringVal("b_override partial"), + Nullable: true, + NullableSet: false, Type: cty.String, + ConstraintType: cty.String, ParsingMode: VariableParseLiteral, DeclRange: hcl.Range{ Filename: "testdata/valid-modules/override-variable/primary.tf", @@ -81,8 +87,9 @@ func TestModuleOverrideModule(t *testing.T) { got := mod.ModuleCalls["example"] want := &ModuleCall{ - Name: "example", - SourceAddr: "./example2-a_override", + Name: "example", + SourceAddr: addrs.ModuleSourceLocal("./example2-a_override"), + SourceAddrRaw: "./example2-a_override", SourceAddrRange: hcl.Range{ Filename: "testdata/valid-modules/override-module/a_override.tf", Start: hcl.Pos{ @@ -288,7 +295,7 @@ func TestModuleOverrideResourceFQNs(t *testing.T) { assertNoDiagnostics(t, diags) got := mod.ManagedResources["test_instance.explicit"] - wantProvider := addrs.NewProvider(addrs.DefaultRegistryHost, "bar", "test") + wantProvider := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test") wantProviderCfg := &ProviderConfigRef{ Name: "bar-test", NameRange: hcl.Range{ @@ -313,3 +320,13 @@ func TestModuleOverrideResourceFQNs(t *testing.T) { t.Fatalf("wrong result: found provider config ref %s, expected nil", got.ProviderConfigRef) } } + +func TestModuleOverrideIgnoreAllChanges(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-ignore-changes") + assertNoDiagnostics(t, diags) + + r := mod.ManagedResources["test_instance.foo"] + if !r.Managed.IgnoreAllChanges { + t.Fatalf("wrong result: expected r.Managed.IgnoreAllChanges to be true") + } +} diff --git a/internal/terraform/configs/module_test.go b/internal/terraform/configs/module_test.go index 9db31b63..5f63450d 100644 --- a/internal/terraform/configs/module_test.go +++ b/internal/terraform/configs/module_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/zclconf/go-cty/cty" ) // TestNewModule_provider_fqns exercises module.gatherProviderLocalNames() @@ -14,7 +15,7 @@ func TestNewModule_provider_local_name(t *testing.T) { t.Fatal(diags.Error()) } - p := addrs.NewProvider(addrs.DefaultRegistryHost, "foo", "test") + p := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") if name, exists := mod.ProviderLocalNames[p]; !exists { t.Fatal("provider FQN foo/test not found") } else { @@ -38,7 +39,7 @@ func TestNewModule_provider_local_name(t *testing.T) { // can also look up the "terraform" provider and see that it sources is // allowed to be overridden, even though there is a builtin provider // called "terraform". - p = addrs.NewProvider(addrs.DefaultRegistryHost, "not-builtin", "not-terraform") + p = addrs.NewProvider(addrs.DefaultProviderRegistryHost, "not-builtin", "not-terraform") if name, exists := mod.ProviderLocalNames[p]; !exists { t.Fatal("provider FQN not-builtin/not-terraform not found") } else { @@ -59,8 +60,8 @@ func TestNewModule_resource_providers(t *testing.T) { // the default implied provider and one explicitly using a provider set in // required_providers wantImplicit := addrs.NewDefaultProvider("test") - wantFoo := addrs.NewProvider(addrs.DefaultRegistryHost, "foo", "test") - wantBar := addrs.NewProvider(addrs.DefaultRegistryHost, "bar", "test") + wantFoo := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + wantBar := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test") // root module if !cfg.Module.ManagedResources["test_instance.explicit"].Provider.Equals(wantFoo) { @@ -107,7 +108,7 @@ func TestProviderForLocalConfig(t *testing.T) { } lc := addrs.LocalProviderConfig{LocalName: "foo-test"} got := mod.ProviderForLocalConfig(lc) - want := addrs.NewProvider(addrs.DefaultRegistryHost, "foo", "test") + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") if !got.Equals(want) { t.Fatalf("wrong result! got %#v, want %#v\n", got, want) } @@ -135,7 +136,7 @@ func TestModule_required_providers_after_resource(t *testing.T) { t.Fatal(diags.Error()) } - want := addrs.NewProvider(addrs.DefaultRegistryHost, "foo", "test") + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") req, exists := mod.ProviderRequirements.RequiredProviders["test"] if !exists { @@ -165,7 +166,7 @@ func TestModule_required_provider_overrides(t *testing.T) { } // The foo provider and resource should be unaffected - want := addrs.NewProvider(addrs.DefaultRegistryHost, "acme", "foo") + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "foo") req, exists := mod.ProviderRequirements.RequiredProviders["foo"] if !exists { t.Fatal("no provider requirements found for \"foo\"") @@ -182,7 +183,7 @@ func TestModule_required_provider_overrides(t *testing.T) { } // The bar provider and resource should be using the override config - want = addrs.NewProvider(addrs.DefaultRegistryHost, "blorp", "bar") + want = addrs.NewProvider(addrs.DefaultProviderRegistryHost, "blorp", "bar") req, exists = mod.ProviderRequirements.RequiredProviders["bar"] if !exists { t.Fatal("no provider requirements found for \"bar\"") @@ -223,7 +224,7 @@ func TestModule_implied_provider(t *testing.T) { // The three providers used in the config resources foo := addrs.NewProvider("registry.acme.corp", "acme", "foo") - whatever := addrs.NewProvider(addrs.DefaultRegistryHost, "acme", "something") + whatever := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "something") bar := addrs.NewDefaultProvider("bar") // Verify that the registry.acme.corp/acme/foo provider is defined in the @@ -289,7 +290,7 @@ func TestImpliedProviderForUnqualifiedType(t *testing.T) { } foo := addrs.NewProvider("registry.acme.corp", "acme", "foo") - whatever := addrs.NewProvider(addrs.DefaultRegistryHost, "acme", "something") + whatever := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "something") bar := addrs.NewDefaultProvider("bar") tf := addrs.NewBuiltInProvider("terraform") @@ -309,3 +310,105 @@ func TestImpliedProviderForUnqualifiedType(t *testing.T) { } } } + +func TestModule_backend_override(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + gotType := mod.Backend.Type + wantType := "bar" + + if gotType != wantType { + t.Errorf("wrong result for backend type: got %#v, want %#v\n", gotType, wantType) + } + + attrs, _ := mod.Backend.Config.JustAttributes() + + gotAttr, diags := attrs["path"].Expr.Value(nil) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + wantAttr := cty.StringVal("CHANGED/relative/path/to/terraform.tfstate") + + if !gotAttr.RawEquals(wantAttr) { + t.Errorf("wrong result for backend 'path': got %#v, want %#v\n", gotAttr, wantAttr) + } +} + +// Unlike most other overrides, backend blocks do not require a base configuration in a primary +// configuration file, as an omitted backend there implies the local backend. +func TestModule_backend_override_no_base(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend-no-base") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.Backend == nil { + t.Errorf("expected module Backend not to be nil") + } +} + +func TestModule_cloud_override_backend(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend-with-cloud") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.Backend != nil { + t.Errorf("expected module Backend to be nil") + } + + if mod.CloudConfig == nil { + t.Errorf("expected module CloudConfig not to be nil") + } +} + +// Unlike most other overrides, cloud blocks do not require a base configuration in a primary +// configuration file, as an omitted backend there implies the local backend and cloud blocks +// override backends. +func TestModule_cloud_override_no_base(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-cloud-no-base") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.CloudConfig == nil { + t.Errorf("expected module CloudConfig not to be nil") + } +} + +func TestModule_cloud_override(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-cloud") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + attrs, _ := mod.CloudConfig.Config.JustAttributes() + + gotAttr, diags := attrs["organization"].Expr.Value(nil) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + wantAttr := cty.StringVal("CHANGED") + + if !gotAttr.RawEquals(wantAttr) { + t.Errorf("wrong result for Cloud 'organization': got %#v, want %#v\n", gotAttr, wantAttr) + } + + // The override should have completely replaced the cloud block in the primary file, no merging + if attrs["should_not_be_present_with_override"] != nil { + t.Errorf("expected 'should_not_be_present_with_override' attribute to be nil") + } +} + +func TestModule_cloud_duplicate_overrides(t *testing.T) { + _, diags := testModuleFromDir("testdata/invalid-modules/override-cloud-duplicates") + want := `Duplicate Terraform Cloud configurations` + if got := diags.Error(); !strings.Contains(got, want) { + t.Fatalf("expected module error to contain %q\nerror was:\n%s", want, got) + } +} diff --git a/internal/terraform/configs/moved.go b/internal/terraform/configs/moved.go new file mode 100644 index 00000000..82c3f568 --- /dev/null +++ b/internal/terraform/configs/moved.go @@ -0,0 +1,72 @@ +package configs + +import ( + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/hashicorp/hcl/v2" +) + +type Moved struct { + From *addrs.MoveEndpoint + To *addrs.MoveEndpoint + + DeclRange hcl.Range +} + +func decodeMovedBlock(block *hcl.Block) (*Moved, hcl.Diagnostics) { + var diags hcl.Diagnostics + moved := &Moved{ + DeclRange: block.DefRange, + } + + content, moreDiags := block.Body.Content(movedBlockSchema) + diags = append(diags, moreDiags...) + + if attr, exists := content.Attributes["from"]; exists { + from, traversalDiags := hcl.AbsTraversalForExpr(attr.Expr) + diags = append(diags, traversalDiags...) + if !traversalDiags.HasErrors() { + from, fromDiags := addrs.ParseMoveEndpoint(from) + diags = append(diags, fromDiags.ToHCL()...) + moved.From = from + } + } + + if attr, exists := content.Attributes["to"]; exists { + to, traversalDiags := hcl.AbsTraversalForExpr(attr.Expr) + diags = append(diags, traversalDiags...) + if !traversalDiags.HasErrors() { + to, toDiags := addrs.ParseMoveEndpoint(to) + diags = append(diags, toDiags.ToHCL()...) + moved.To = to + } + } + + // we can only move from a module to a module, resource to resource, etc. + if !diags.HasErrors() { + if !moved.From.MightUnifyWith(moved.To) { + // We can catch some obviously-wrong combinations early here, + // but we still have other dynamic validation to do at runtime. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"moved\" addresses", + Detail: "The \"from\" and \"to\" addresses must either both refer to resources or both refer to modules.", + Subject: &moved.DeclRange, + }) + } + } + + return moved, diags +} + +var movedBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "from", + Required: true, + }, + { + Name: "to", + Required: true, + }, + }, +} diff --git a/internal/terraform/configs/moved_test.go b/internal/terraform/configs/moved_test.go new file mode 100644 index 00000000..a95c17ac --- /dev/null +++ b/internal/terraform/configs/moved_test.go @@ -0,0 +1,208 @@ +package configs + +import ( + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" +) + +func TestMovedBlock_decode(t *testing.T) { + blockRange := hcl.Range{ + Filename: "mock.tf", + Start: hcl.Pos{Line: 3, Column: 12, Byte: 27}, + End: hcl.Pos{Line: 3, Column: 19, Byte: 34}, + } + + foo_expr := hcltest.MockExprTraversalSrc("test_instance.foo") + bar_expr := hcltest.MockExprTraversalSrc("test_instance.bar") + + foo_index_expr := hcltest.MockExprTraversalSrc("test_instance.foo[1]") + bar_index_expr := hcltest.MockExprTraversalSrc("test_instance.bar[\"one\"]") + + mod_foo_expr := hcltest.MockExprTraversalSrc("module.foo") + mod_bar_expr := hcltest.MockExprTraversalSrc("module.bar") + + tests := map[string]struct { + input *hcl.Block + want *Moved + err string + }{ + "success": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: foo_expr, + }, + "to": { + Name: "to", + Expr: bar_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + From: mustMoveEndpointFromExpr(foo_expr), + To: mustMoveEndpointFromExpr(bar_expr), + DeclRange: blockRange, + }, + ``, + }, + "indexed resources": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: foo_index_expr, + }, + "to": { + Name: "to", + Expr: bar_index_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + From: mustMoveEndpointFromExpr(foo_index_expr), + To: mustMoveEndpointFromExpr(bar_index_expr), + DeclRange: blockRange, + }, + ``, + }, + "modules": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: mod_foo_expr, + }, + "to": { + Name: "to", + Expr: mod_bar_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + From: mustMoveEndpointFromExpr(mod_foo_expr), + To: mustMoveEndpointFromExpr(mod_bar_expr), + DeclRange: blockRange, + }, + ``, + }, + "error: missing argument": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: foo_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + From: mustMoveEndpointFromExpr(foo_expr), + DeclRange: blockRange, + }, + "Missing required argument", + }, + "error: type mismatch": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "to": { + Name: "to", + Expr: foo_expr, + }, + "from": { + Name: "from", + Expr: mod_foo_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + To: mustMoveEndpointFromExpr(foo_expr), + From: mustMoveEndpointFromExpr(mod_foo_expr), + DeclRange: blockRange, + }, + "Invalid \"moved\" addresses", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, diags := decodeMovedBlock(test.input) + + if diags.HasErrors() { + if test.err == "" { + t.Fatalf("unexpected error: %s", diags.Errs()) + } + if gotErr := diags[0].Summary; gotErr != test.err { + t.Errorf("wrong error, got %q, want %q", gotErr, test.err) + } + } else if test.err != "" { + t.Fatal("expected error") + } + + if !cmp.Equal(got, test.want, cmp.AllowUnexported(addrs.MoveEndpoint{})) { + t.Fatalf("wrong result: %s", cmp.Diff(got, test.want)) + } + }) + } +} + +func TestMovedBlock_inModule(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/valid-modules/moved-blocks") + if diags.HasErrors() { + t.Errorf("unexpected error: %s", diags.Error()) + } + + var gotPairs [][2]string + for _, mc := range mod.Moved { + gotPairs = append(gotPairs, [2]string{mc.From.String(), mc.To.String()}) + } + wantPairs := [][2]string{ + {`test.foo`, `test.bar`}, + {`test.foo`, `test.bar["bloop"]`}, + {`module.a`, `module.b`}, + {`module.a`, `module.a["foo"]`}, + {`test.foo`, `module.a.test.foo`}, + {`data.test.foo`, `data.test.bar`}, + } + if diff := cmp.Diff(wantPairs, gotPairs); diff != "" { + t.Errorf("wrong addresses\n%s", diff) + } +} + +func mustMoveEndpointFromExpr(expr hcl.Expression) *addrs.MoveEndpoint { + traversal, hcldiags := hcl.AbsTraversalForExpr(expr) + if hcldiags.HasErrors() { + panic(hcldiags.Errs()) + } + + ep, diags := addrs.ParseMoveEndpoint(traversal) + if diags.HasErrors() { + panic(diags.Err()) + } + + return ep +} diff --git a/internal/terraform/configs/named_values.go b/internal/terraform/configs/named_values.go index 61b04aeb..2002860a 100644 --- a/internal/terraform/configs/named_values.go +++ b/internal/terraform/configs/named_values.go @@ -2,7 +2,6 @@ package configs import ( "fmt" - "unicode" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" @@ -22,14 +21,26 @@ type Variable struct { Name string Description string Default cty.Value - Type cty.Type + + // Type is the concrete type of the variable value. + Type cty.Type + // ConstraintType is used for decoding and type conversions, and may + // contain nested ObjectWithOptionalAttr types. + ConstraintType cty.Type + ParsingMode VariableParsingMode - Validations []*VariableValidation + Validations []*CheckRule Sensitive bool DescriptionSet bool SensitiveSet bool + // Nullable indicates that null is a valid value for this variable. Setting + // Nullable to false means that the module can expect this variable to + // never be null. + Nullable bool + NullableSet bool + DeclRange hcl.Range } @@ -45,6 +56,7 @@ func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagno // or not they are set when we merge. if !override { v.Type = cty.DynamicPseudoType + v.ConstraintType = cty.DynamicPseudoType v.ParsingMode = VariableParseLiteral } @@ -92,7 +104,8 @@ func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagno if attr, exists := content.Attributes["type"]; exists { ty, parseMode, tyDiags := decodeVariableType(attr.Expr) diags = append(diags, tyDiags...) - v.Type = ty + v.ConstraintType = ty + v.Type = ty.WithoutOptionalAttributesDeep() v.ParsingMode = parseMode } @@ -102,6 +115,16 @@ func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagno v.SensitiveSet = true } + if attr, exists := content.Attributes["nullable"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Nullable) + diags = append(diags, valDiags...) + v.NullableSet = true + } else { + // The current default is true, which is subject to change in a future + // language edition. + v.Nullable = true + } + if attr, exists := content.Attributes["default"]; exists { val, valDiags := attr.Expr.Value(nil) diags = append(diags, valDiags...) @@ -112,9 +135,9 @@ func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagno // attribute above. // However, we can't do this if we're in an override file where // the type might not be set; we'll catch that during merge. - if v.Type != cty.NilType { + if v.ConstraintType != cty.NilType { var err error - val, err = convert.Convert(val, v.Type) + val, err = convert.Convert(val, v.ConstraintType) if err != nil { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -126,6 +149,15 @@ func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagno } } + if !v.Nullable && val.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: "A null default value is not valid when nullable=false.", + Subject: attr.Expr.Range().Ptr(), + }) + } + v.Default = val } @@ -275,53 +307,12 @@ func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnosti } } -// VariableValidation represents a configuration-defined validation rule -// for a particular input variable, given as a "validation" block inside -// a "variable" block. -type VariableValidation struct { - // Condition is an expression that refers to the variable being tested - // and contains no other references. The expression must return true - // to indicate that the value is valid or false to indicate that it is - // invalid. If the expression produces an error, that's considered a bug - // in the module defining the validation rule, not an error in the caller. - Condition hcl.Expression - - // ErrorMessage is one or more full sentences, which would need to be in - // English for consistency with the rest of the error message output but - // can in practice be in any language as long as it ends with a period. - // The message should describe what is required for the condition to return - // true in a way that would make sense to a caller of the module. - ErrorMessage string - - DeclRange hcl.Range -} - -func decodeVariableValidationBlock(varName string, block *hcl.Block, override bool) (*VariableValidation, hcl.Diagnostics) { - var diags hcl.Diagnostics - vv := &VariableValidation{ - DeclRange: block.DefRange, - } - - if override { - // For now we'll just forbid overriding validation blocks, to simplify - // the initial design. If we can find a clear use-case for overriding - // validations in override files and there's a way to define it that - // isn't confusing then we could relax this. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Can't override variable validation rules", - Detail: "Variable \"validation\" blocks cannot be used in override files.", - Subject: vv.DeclRange.Ptr(), - }) - return vv, diags - } - - content, moreDiags := block.Body.Content(variableValidationBlockSchema) - diags = append(diags, moreDiags...) - - if attr, exists := content.Attributes["condition"]; exists { - vv.Condition = attr.Expr - +// decodeVariableValidationBlock is a wrapper around decodeCheckRuleBlock +// that imposes the additional rule that the condition expression can refer +// only to an input variable of the given name. +func decodeVariableValidationBlock(varName string, block *hcl.Block, override bool) (*CheckRule, hcl.Diagnostics) { + vv, diags := decodeCheckRuleBlock(block, override) + if vv.Condition != nil { // The validation condition can only refer to the variable itself, // to ensure that the variable declaration can't create additional // edges in the dependency graph. @@ -349,83 +340,39 @@ func decodeVariableValidationBlock(varName string, block *hcl.Block, override bo Severity: hcl.DiagError, Summary: "Invalid variable validation condition", Detail: fmt.Sprintf("The condition for variable %q must refer to var.%s in order to test incoming values.", varName, varName), - Subject: attr.Expr.Range().Ptr(), + Subject: vv.Condition.Range().Ptr(), }) } } - if attr, exists := content.Attributes["error_message"]; exists { - moreDiags := gohcl.DecodeExpression(attr.Expr, nil, &vv.ErrorMessage) - diags = append(diags, moreDiags...) - if !moreDiags.HasErrors() { - const errSummary = "Invalid validation error message" - switch { - case vv.ErrorMessage == "": - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errSummary, - Detail: "An empty string is not a valid nor useful error message.", - Subject: attr.Expr.Range().Ptr(), - }) - case !looksLikeSentences(vv.ErrorMessage): - // Because we're going to include this string verbatim as part - // of a bigger error message written in our usual style in - // English, we'll require the given error message to conform - // to that. We might relax this in future if e.g. we start - // presenting these error messages in a different way, or if - // Terraform starts supporting producing error messages in - // other human languages, etc. - // For pragmatism we also allow sentences ending with - // exclamation points, but we don't mention it explicitly here - // because that's not really consistent with the Terraform UI - // writing style. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errSummary, - Detail: "The validation error message must be at least one full sentence starting with an uppercase letter and ending with a period or question mark.\n\nYour given message will be included as part of a larger Terraform error message, written as English prose. For broadly-shared modules we suggest using a similar writing style so that the overall result will be consistent.", - Subject: attr.Expr.Range().Ptr(), - }) + if vv.ErrorMessage != nil { + // The same applies to the validation error message, except that + // references are not required. A string literal is a valid error + // message. + goodRefs := 0 + for _, traversal := range vv.ErrorMessage.Variables() { + ref, moreDiags := addrs.ParseRef(traversal) + if !moreDiags.HasErrors() { + if addr, ok := ref.Subject.(addrs.InputVariable); ok { + if addr.Name == varName { + goodRefs++ + continue // Reference is valid + } + } } + // If we fall out here then the reference is invalid. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference in variable validation", + Detail: fmt.Sprintf("The error message for variable %q can only refer to the variable itself, using var.%s.", varName, varName), + Subject: traversal.SourceRange().Ptr(), + }) } } return vv, diags } -// looksLikeSentence is a simple heuristic that encourages writing error -// messages that will be presentable when included as part of a larger -// Terraform error diagnostic whose other text is written in the Terraform -// UI writing style. -// -// This is intentionally not a very strong validation since we're assuming -// that module authors want to write good messages and might just need a nudge -// about Terraform's specific style, rather than that they are going to try -// to work around these rules to write a lower-quality message. -func looksLikeSentences(s string) bool { - if len(s) < 1 { - return false - } - runes := []rune(s) // HCL guarantees that all strings are valid UTF-8 - first := runes[0] - last := runes[len(runes)-1] - - // If the first rune is a letter then it must be an uppercase letter. - // (This will only see the first rune in a multi-rune combining sequence, - // but the first rune is generally the letter if any are, and if not then - // we'll just ignore it because we're primarily expecting English messages - // right now anyway, for consistency with all of Terraform's other output.) - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - return false - } - - // The string must be at least one full sentence, which implies having - // sentence-ending punctuation. - // (This assumes that if a sentence ends with quotes then the period - // will be outside the quotes, which is consistent with Terraform's UI - // writing style.) - return last == '.' || last == '?' || last == '!' -} - // Output represents an "output" block in a module or file. type Output struct { Name string @@ -434,6 +381,8 @@ type Output struct { DependsOn []hcl.Traversal Sensitive bool + Preconditions []*CheckRule + DescriptionSet bool SensitiveSet bool @@ -487,6 +436,26 @@ func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostic o.DependsOn = append(o.DependsOn, deps...) } + for _, block := range content.Blocks { + switch block.Type { + case "precondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + o.Preconditions = append(o.Preconditions, cr) + case "postcondition": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Postconditions are not allowed", + Detail: "Output values can only have preconditions, not postconditions.", + Subject: block.TypeRange.Ptr(), + }) + default: + // The cases above should be exhaustive for all block types + // defined in the block type schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } + return o, diags } @@ -548,23 +517,13 @@ var variableBlockSchema = &hcl.BodySchema{ { Name: "sensitive", }, - }, - Blocks: []hcl.BlockHeaderSchema{ { - Type: "validation", + Name: "nullable", }, }, -} - -var variableValidationBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "condition", - Required: true, - }, + Blocks: []hcl.BlockHeaderSchema{ { - Name: "error_message", - Required: true, + Type: "validation", }, }, } @@ -585,4 +544,8 @@ var outputBlockSchema = &hcl.BodySchema{ Name: "sensitive", }, }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "precondition"}, + {Type: "postcondition"}, + }, } diff --git a/internal/terraform/configs/named_values_test.go b/internal/terraform/configs/named_values_test.go deleted file mode 100644 index 3a44438a..00000000 --- a/internal/terraform/configs/named_values_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package configs - -import ( - "testing" -) - -func Test_looksLikeSentences(t *testing.T) { - tests := map[string]struct { - args string - want bool - }{ - "empty sentence": { - args: "", - want: false, - }, - "valid sentence": { - args: "A valid sentence.", - want: true, - }, - "valid sentence with an accent": { - args: `A Valid sentence with an accent "é".`, - want: true, - }, - } - - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - if got := looksLikeSentences(tt.args); got != tt.want { - t.Errorf("looksLikeSentences() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/internal/terraform/configs/parser_config.go b/internal/terraform/configs/parser_config.go index ec66b4cc..0281339c 100644 --- a/internal/terraform/configs/parser_config.go +++ b/internal/terraform/configs/parser_config.go @@ -72,6 +72,13 @@ func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnost file.Backends = append(file.Backends, backendCfg) } + case "cloud": + cloudCfg, cfgDiags := decodeCloudBlock(innerBlock) + diags = append(diags, cfgDiags...) + if cloudCfg != nil { + file.CloudConfigs = append(file.CloudConfigs, cloudCfg) + } + case "required_providers": reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) diags = append(diags, reqsDiags...) @@ -135,19 +142,26 @@ func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnost } case "resource": - cfg, cfgDiags := decodeResourceBlock(block) + cfg, cfgDiags := decodeResourceBlock(block, override) diags = append(diags, cfgDiags...) if cfg != nil { file.ManagedResources = append(file.ManagedResources, cfg) } case "data": - cfg, cfgDiags := decodeDataBlock(block) + cfg, cfgDiags := decodeDataBlock(block, override) diags = append(diags, cfgDiags...) if cfg != nil { file.DataResources = append(file.DataResources, cfg) } + case "moved": + cfg, cfgDiags := decodeMovedBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Moved = append(file.Moved, cfg) + } + default: // Should never happen because the above cases should be exhaustive // for all block type names in our schema. @@ -235,6 +249,9 @@ var configFileSchema = &hcl.BodySchema{ Type: "data", LabelNames: []string{"type", "name"}, }, + { + Type: "moved", + }, }, } @@ -251,6 +268,9 @@ var terraformBlockSchema = &hcl.BodySchema{ Type: "backend", LabelNames: []string{"type"}, }, + { + Type: "cloud", + }, { Type: "required_providers", }, diff --git a/internal/terraform/configs/parser_config_dir_test.go b/internal/terraform/configs/parser_config_dir_test.go index c3c284aa..da2f23c9 100644 --- a/internal/terraform/configs/parser_config_dir_test.go +++ b/internal/terraform/configs/parser_config_dir_test.go @@ -5,6 +5,8 @@ import ( "io/ioutil" "path/filepath" "testing" + + "github.com/hashicorp/hcl/v2" ) // TestParseLoadConfigDirSuccess is a simple test that just verifies that @@ -31,6 +33,33 @@ func TestParserLoadConfigDirSuccess(t *testing.T) { path := filepath.Join("testdata/valid-modules", name) mod, diags := parser.LoadConfigDir(path) + if len(diags) != 0 && len(mod.ActiveExperiments) != 0 { + // As a special case to reduce churn while we're working + // through experimental features, we'll ignore the warning + // that an experimental feature is active if the module + // intentionally opted in to that feature. + // If you want to explicitly test for the feature warning + // to be generated, consider using testdata/warning-files + // instead. + filterDiags := make(hcl.Diagnostics, 0, len(diags)) + for _, diag := range diags { + if diag.Severity != hcl.DiagWarning { + continue + } + match := false + for exp := range mod.ActiveExperiments { + allowedSummary := fmt.Sprintf("Experimental feature %q is active", exp.Keyword()) + if diag.Summary == allowedSummary { + match = true + break + } + } + if !match { + filterDiags = append(filterDiags, diag) + } + } + diags = filterDiags + } if len(diags) != 0 { t.Errorf("unexpected diagnostics") for _, diag := range diags { diff --git a/internal/terraform/configs/parser_config_test.go b/internal/terraform/configs/parser_config_test.go index 7832914c..e1244ade 100644 --- a/internal/terraform/configs/parser_config_test.go +++ b/internal/terraform/configs/parser_config_test.go @@ -97,7 +97,7 @@ func TestParserLoadConfigFileFailureMessages(t *testing.T) { { "invalid-files/data-resource-lifecycle.tf", hcl.DiagError, - "Unsupported lifecycle block", + "Invalid data resource lifecycle argument", }, { "invalid-files/variable-type-unknown.tf", diff --git a/internal/terraform/configs/parser_test.go b/internal/terraform/configs/parser_test.go index cb223928..4eb558d1 100644 --- a/internal/terraform/configs/parser_test.go +++ b/internal/terraform/configs/parser_test.go @@ -87,9 +87,9 @@ func testNestedModuleConfigFromDir(t *testing.T, path string) (*Config, hcl.Diag // Build a full path by walking up the module tree, prepending each // source address path until we hit the root - paths := []string{req.SourceAddr} + paths := []string{req.SourceAddr.String()} for config := req.Parent; config != nil && config.Parent != nil; config = config.Parent { - paths = append([]string{config.SourceAddr}, paths...) + paths = append([]string{config.SourceAddr.String()}, paths...) } paths = append([]string{path}, paths...) sourcePath := filepath.Join(paths...) diff --git a/internal/terraform/configs/provider.go b/internal/terraform/configs/provider.go index 0d9cc89d..9d38532a 100644 --- a/internal/terraform/configs/provider.go +++ b/internal/terraform/configs/provider.go @@ -46,6 +46,11 @@ func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { name := block.Labels[0] nameDiags := checkProviderNameNormalized(name, block.DefRange) diags = append(diags, nameDiags...) + if nameDiags.HasErrors() { + // If the name is invalid then we mustn't produce a result because + // downstreams could try to use it as a provider type and then crash. + return nil, diags + } provider := &Provider{ Name: name, diff --git a/internal/terraform/configs/provider_requirements.go b/internal/terraform/configs/provider_requirements.go index 4731d8cc..4bab6791 100644 --- a/internal/terraform/configs/provider_requirements.go +++ b/internal/terraform/configs/provider_requirements.go @@ -86,6 +86,7 @@ func decodeRequiredProvidersBlock(block *hcl.Block) (*RequiredProviders, hcl.Dia continue } + LOOP: for _, kv := range kvs { key, keyDiags := kv.Key.Value(nil) if keyDiags.HasErrors() { @@ -213,7 +214,7 @@ func decodeRequiredProvidersBlock(block *hcl.Block) (*RequiredProviders, hcl.Dia Detail: `required_providers objects can only contain "version", "source" and "configuration_aliases" attributes. To configure a provider, use a "provider" block.`, Subject: kv.Key.Range().Ptr(), }) - break + break LOOP } } diff --git a/internal/terraform/configs/provider_requirements_test.go b/internal/terraform/configs/provider_requirements_test.go index a994c9f3..1eb62e56 100644 --- a/internal/terraform/configs/provider_requirements_test.go +++ b/internal/terraform/configs/provider_requirements_test.go @@ -94,7 +94,7 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { "my-test": { Name: "my-test", Source: "mycloud/test", - Type: addrs.NewProvider(addrs.DefaultRegistryHost, "mycloud", "test"), + Type: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "mycloud", "test"), Requirement: testVC("2.0.0"), DeclRange: mockRange, }, @@ -133,7 +133,7 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { "my-test": { Name: "my-test", Source: "mycloud/test", - Type: addrs.NewProvider(addrs.DefaultRegistryHost, "mycloud", "test"), + Type: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "mycloud", "test"), Requirement: testVC("2.0.0"), DeclRange: mockRange, }, diff --git a/internal/terraform/configs/provider_validation.go b/internal/terraform/configs/provider_validation.go index a7620121..ea3e0d54 100644 --- a/internal/terraform/configs/provider_validation.go +++ b/internal/terraform/configs/provider_validation.go @@ -2,6 +2,7 @@ package configs import ( "fmt" + "sort" "strings" "github.com/camptocamp/terraboard/internal/terraform/addrs" @@ -20,20 +21,33 @@ import ( // passed in through the module call. // // The call argument is the ModuleCall for the provided Config cfg. The -// noProviderConfig argument is passed down the call stack, indicating that the -// module call, or a parent module call, has used a feature that precludes -// providers from being configured at all within the module. -func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConfig bool) (diags hcl.Diagnostics) { +// noProviderConfigRange argument is passed down the call stack, indicating +// that the module call, or a parent module call, has used a feature (at the +// specified source location) that precludes providers from being configured at +// all within the module. +func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConfigRange *hcl.Range) (diags hcl.Diagnostics) { mod := cfg.Module for name, child := range cfg.Children { mc := mod.ModuleCalls[name] - + childNoProviderConfigRange := noProviderConfigRange // if the module call has any of count, for_each or depends_on, // providers are prohibited from being configured in this module, or // any module beneath this module. - nope := noProviderConfig || mc.Count != nil || mc.ForEach != nil || mc.DependsOn != nil - diags = append(diags, validateProviderConfigs(mc, child, nope)...) + switch { + case mc.Count != nil: + childNoProviderConfigRange = mc.Count.Range().Ptr() + case mc.ForEach != nil: + childNoProviderConfigRange = mc.ForEach.Range().Ptr() + case mc.DependsOn != nil: + if len(mc.DependsOn) > 0 { + childNoProviderConfigRange = mc.DependsOn[0].SourceRange().Ptr() + } else { + // Weird! We'll just use the call itself, then. + childNoProviderConfigRange = mc.DeclRange.Ptr() + } + } + diags = append(diags, validateProviderConfigs(mc, child, childNoProviderConfigRange)...) } // the set of provider configuration names passed into the module, with the @@ -42,11 +56,11 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf // the set of empty configurations that could be proxy configurations, with // the source range of the empty configuration block. - emptyConfigs := map[string]*hcl.Range{} + emptyConfigs := map[string]hcl.Range{} // the set of provider with a defined configuration, with the source range // of the configuration block declaration. - configured := map[string]*hcl.Range{} + configured := map[string]hcl.Range{} // the set of configuration_aliases defined in the required_providers // block, with the fully qualified provider type. @@ -54,26 +68,22 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf // the set of provider names defined in the required_providers block, and // their provider types. - localNames := map[string]addrs.AbsProviderConfig{} + localNames := map[string]addrs.Provider{} for _, pc := range mod.ProviderConfigs { name := providerName(pc.Name, pc.Alias) // Validate the config against an empty schema to see if it's empty. _, pcConfigDiags := pc.Config.Content(&hcl.BodySchema{}) if pcConfigDiags.HasErrors() || pc.Version.Required != nil { - configured[name] = &pc.DeclRange + configured[name] = pc.DeclRange } else { - emptyConfigs[name] = &pc.DeclRange + emptyConfigs[name] = pc.DeclRange } } if mod.ProviderRequirements != nil { for _, req := range mod.ProviderRequirements.RequiredProviders { - addr := addrs.AbsProviderConfig{ - Module: cfg.Path, - Provider: req.Type, - } - localNames[req.Name] = addr + localNames[req.Name] = req.Type for _, alias := range req.Aliases { addr := addrs.AbsProviderConfig{ Module: cfg.Path, @@ -125,11 +135,15 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf // configuration. We ignore empty configs, because they will // already produce a warning. if !(confOK || localOK) { + defAddr := addrs.NewDefaultProvider(name) diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagWarning, - Summary: fmt.Sprintf("Provider %s is undefined", name), - Detail: fmt.Sprintf("No provider named %s has been declared in %s.\n", name, moduleText) + - fmt.Sprintf("If you wish to refer to the %s provider within the module, add a provider configuration, or an entry in the required_providers block.", name), + Summary: "Reference to undefined provider", + Detail: fmt.Sprintf( + "There is no explicit declaration for local provider name %q in %s, so Terraform is assuming you mean to pass a configuration for provider %q.\n\nTo clarify your intent and silence this warning, add to %s a required_providers entry named %q with source = %q, or a different source address if appropriate.", + name, moduleText, defAddr.ForDisplay(), + parentModuleText, name, defAddr.ForDisplay(), + ), Subject: &passed.InParent.NameRange, }) continue @@ -139,12 +153,16 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf // there won't be a configuration available at runtime if the // parent module did not pass one in. if !cfg.Path.IsRoot() && !(confOK || passedOK) { + defAddr := addrs.NewDefaultProvider(name) diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagWarning, - Summary: fmt.Sprintf("No configuration passed in for provider %s in %s", name, cfg.Path), - Detail: fmt.Sprintf("Provider %s is referenced within %s, but no configuration has been supplied.\n", name, moduleText) + - fmt.Sprintf("Add a provider named %s to the providers map for %s in %s.", name, cfg.Path, parentModuleText), - Subject: &passed.InParent.NameRange, + Summary: "Missing required provider configuration", + Detail: fmt.Sprintf( + "The configuration for %s expects to inherit a configuration for provider %s with local name %q, but %s doesn't pass a configuration under that name.\n\nTo satisfy this requirement, add an entry for %q to the \"providers\" argument in the module %q block.", + moduleText, defAddr.ForDisplay(), name, parentModuleText, + name, parentCall.Name, + ), + Subject: parentCall.DeclRange.Ptr(), }) } } @@ -156,11 +174,20 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf } // there cannot be any configurations if no provider config is allowed - if len(configured) > 0 && noProviderConfig { + if len(configured) > 0 && noProviderConfigRange != nil { + // We report this from the perspective of the use of count, for_each, + // or depends_on rather than from inside the module, because the + // recipient of this message is more likely to be the author of the + // calling module (trying to use an older module that hasn't been + // updated yet) than of the called module. diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: fmt.Sprintf("Module %s contains provider configuration", cfg.Path), - Detail: "Providers cannot be configured within modules using count, for_each or depends_on.", + Summary: "Module is incompatible with count, for_each, and depends_on", + Detail: fmt.Sprintf( + "The module at %s is a legacy module which contains its own local provider configurations, and so calls to it may not use the count, for_each, or depends_on arguments.\n\nIf you also control the module %q, consider updating this module to instead expect provider configurations to be passed by its caller.", + cfg.Path, cfg.SourceAddr, + ), + Subject: noProviderConfigRange, }) } @@ -170,8 +197,11 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Cannot override provider configuration", - Detail: fmt.Sprintf("Provider %s is configured within the module %s and cannot be overridden.", name, cfg.Path), - Subject: &passed.InChild.NameRange, + Detail: fmt.Sprintf( + "The configuration of %s has its own local configuration for %s, and so it cannot accept an overridden configuration provided by %s.", + moduleText, name, parentModuleText, + ), + Subject: &passed.InChild.NameRange, }) } } @@ -188,9 +218,12 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: fmt.Sprintf("No configuration for provider %s", name), - Detail: fmt.Sprintf("Configuration required for %s.\n", providerAddr) + - fmt.Sprintf("Add a provider named %s to the providers map for %s in %s.", name, cfg.Path, parentModuleText), + Summary: "Missing required provider configuration", + Detail: fmt.Sprintf( + "The child module requires an additional configuration for provider %s, with the local name %q.\n\nRefer to the module's documentation to understand the intended purpose of this additional provider configuration, and then add an entry for %s in the \"providers\" meta-argument in the module block to choose which provider configuration the module should use for that purpose.", + providerAddr.Provider.ForDisplay(), name, + name, + ), Subject: &parentCall.DeclRange, }) } @@ -214,7 +247,7 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf localAddr, localName := localNames[name] if localName { - providerAddr = localAddr + providerAddr.Provider = localAddr } aliasAddr, configAlias := configAliases[name] @@ -225,20 +258,30 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf _, emptyConfig := emptyConfigs[name] if !(localName || configAlias || emptyConfig) { - severity := hcl.DiagError // we still allow default configs, so switch to a warning if the incoming provider is a default if providerAddr.Provider.IsDefault() { - severity = hcl.DiagWarning + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Reference to undefined provider", + Detail: fmt.Sprintf( + "There is no explicit declaration for local provider name %q in %s, so Terraform is assuming you mean to pass a configuration for %q.\n\nIf you also control the child module, add a required_providers entry named %q with the source address %q.", + name, moduleText, providerAddr.Provider.ForDisplay(), + name, providerAddr.Provider.ForDisplay(), + ), + Subject: &passed.InChild.NameRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reference to undefined provider", + Detail: fmt.Sprintf( + "The child module does not declare any provider requirement with the local name %q.\n\nIf you also control the child module, you can add a required_providers entry named %q with the source address %q to accept this provider configuration.", + name, name, providerAddr.Provider.ForDisplay(), + ), + Subject: &passed.InChild.NameRange, + }) } - - diags = append(diags, &hcl.Diagnostic{ - Severity: severity, - Summary: fmt.Sprintf("Provider %s is undefined", name), - Detail: fmt.Sprintf("Module %s does not declare a provider named %s.\n", cfg.Path, name) + - fmt.Sprintf("If you wish to specify a provider configuration for the module, add an entry for %s in the required_providers block within the module.", name), - Subject: &passed.InChild.NameRange, - }) } // The provider being passed in must also be of the correct type. @@ -264,40 +307,126 @@ func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConf } if !providerAddr.Provider.Equals(parentAddr.Provider) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Invalid type for provider %s", providerAddr), - Detail: fmt.Sprintf("Cannot use configuration from %s for %s. ", parentAddr, providerAddr) + - "The given provider configuration is for a different provider type.", - Subject: &passed.InChild.NameRange, - }) + // If this module declares the same source address for a different + // local name then we'll prefer to suggest changing to match + // the child module's chosen name, assuming that it was the local + // name that was wrong rather than the source address. + var otherLocalName string + for localName, sourceAddr := range localNames { + if sourceAddr.Equals(parentAddr.Provider) { + otherLocalName = localName + break + } + } + + const errSummary = "Provider type mismatch" + if otherLocalName != "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errSummary, + Detail: fmt.Sprintf( + "The assigned configuration is for provider %q, but local name %q in %s represents %q.\n\nTo pass this configuration to the child module, use the local name %q instead.", + parentAddr.Provider.ForDisplay(), passed.InChild.Name, + parentModuleText, providerAddr.Provider.ForDisplay(), + otherLocalName, + ), + Subject: &passed.InChild.NameRange, + }) + } else { + // If there is no declared requirement for the provider the + // caller is trying to pass under any name then we'll instead + // report it as an unsuitable configuration to pass into the + // child module's provider configuration slot. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errSummary, + Detail: fmt.Sprintf( + "The local name %q in %s represents provider %q, but %q in %s represents %q.\n\nEach provider has its own distinct configuration schema and provider types, so this module's %q can be assigned only a configuration for %s, which is not required by %s.", + passed.InParent, parentModuleText, parentAddr.Provider.ForDisplay(), + passed.InChild, moduleText, providerAddr.Provider.ForDisplay(), + passed.InChild, providerAddr.Provider.ForDisplay(), + moduleText, + ), + Subject: passed.InParent.NameRange.Ptr(), + }) + } } } - // Empty configurations are no longer needed + // Empty configurations are no longer needed. Since the replacement for + // this calls for one entry per provider rather than one entry per + // provider _configuration_, we'll first gather them up by provider + // and then report a single warning for each, whereby we can show a direct + // example of what the replacement should look like. + type ProviderReqSuggestion struct { + SourceAddr addrs.Provider + SourceRanges []hcl.Range + RequiredConfigs []string + AliasCount int + } + providerReqSuggestions := make(map[string]*ProviderReqSuggestion) for name, src := range emptyConfigs { - detail := fmt.Sprintf("Remove the %s provider block from %s.", name, cfg.Path) + providerLocalName := name + if idx := strings.IndexByte(providerLocalName, '.'); idx >= 0 { + providerLocalName = providerLocalName[:idx] + } - isAlias := strings.Contains(name, ".") - _, isConfigAlias := configAliases[name] - _, isLocalName := localNames[name] + sourceAddr, ok := localNames[name] + if !ok { + sourceAddr = addrs.NewDefaultProvider(providerLocalName) + } - if isAlias && !isConfigAlias { - localName := strings.Split(name, ".")[0] - detail = fmt.Sprintf("Remove the %s provider block from %s. Add %s to the list of configuration_aliases for %s in required_providers to define the provider configuration name.", name, cfg.Path, name, localName) + suggestion := providerReqSuggestions[providerLocalName] + if suggestion == nil { + providerReqSuggestions[providerLocalName] = &ProviderReqSuggestion{ + SourceAddr: sourceAddr, + } + suggestion = providerReqSuggestions[providerLocalName] } - if !isAlias && !isLocalName { - // if there is no local name, add a note to include it in the - // required_provider block - detail += fmt.Sprintf("\nTo ensure the correct provider configuration is used, add %s to the required_providers configuration", name) + if providerLocalName != name { + // It's an aliased provider config, then. + suggestion.AliasCount++ } + suggestion.RequiredConfigs = append(suggestion.RequiredConfigs, name) + suggestion.SourceRanges = append(suggestion.SourceRanges, src) + } + for name, suggestion := range providerReqSuggestions { + var buf strings.Builder + + fmt.Fprintf( + &buf, + "Earlier versions of Terraform used empty provider blocks (\"proxy provider configurations\") for child modules to declare their need to be passed a provider configuration by their callers. That approach was ambiguous and is now deprecated.\n\nIf you control this module, you can migrate to the new declaration syntax by removing all of the empty provider %q blocks and then adding or updating an entry like the following to the required_providers block of %s:\n", + name, moduleText, + ) + fmt.Fprintf(&buf, " %s = {\n", name) + fmt.Fprintf(&buf, " source = %q\n", suggestion.SourceAddr.ForDisplay()) + if suggestion.AliasCount > 0 { + // A lexical sort is fine because all of these strings are + // guaranteed to start with the same provider local name, and + // so we're only really sorting by the alias part. + sort.Strings(suggestion.RequiredConfigs) + fmt.Fprintln(&buf, " configuration_aliases = [") + for _, addrStr := range suggestion.RequiredConfigs { + fmt.Fprintf(&buf, " %s,\n", addrStr) + } + fmt.Fprintln(&buf, " ]") + + } + fmt.Fprint(&buf, " }") + + // We're arbitrarily going to just take the one source range that + // sorts earliest here. Multiple should be rare, so this is only to + // ensure that we produce a deterministic result in the edge case. + sort.Slice(suggestion.SourceRanges, func(i, j int) bool { + return suggestion.SourceRanges[i].String() < suggestion.SourceRanges[j].String() + }) diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagWarning, - Summary: "Empty provider configuration blocks are not required", - Detail: detail, - Subject: src, + Summary: "Redundant empty provider block", + Detail: buf.String(), + Subject: suggestion.SourceRanges[0].Ptr(), }) } diff --git a/internal/terraform/configs/resource.go b/internal/terraform/configs/resource.go index db7f9be0..72956b2b 100644 --- a/internal/terraform/configs/resource.go +++ b/internal/terraform/configs/resource.go @@ -6,8 +6,11 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/lang" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" ) // Resource represents a "resource" or "data" block in a module or file. @@ -22,8 +25,13 @@ type Resource struct { ProviderConfigRef *ProviderConfigRef Provider addrs.Provider + Preconditions []*CheckRule + Postconditions []*CheckRule + DependsOn []hcl.Traversal + TriggersReplacement []hcl.Expression + // Managed is populated only for Mode = addrs.ManagedResourceMode, // containing the additional fields that apply to managed resources. // For all other resource modes, this field is nil. @@ -81,7 +89,13 @@ func (r *Resource) ProviderConfigAddr() addrs.LocalProviderConfig { } } -func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { +// HasCustomConditions returns true if and only if the resource has at least +// one author-specified custom condition. +func (r *Resource) HasCustomConditions() bool { + return len(r.Postconditions) != 0 || len(r.Preconditions) != 0 +} + +func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { var diags hcl.Diagnostics r := &Resource{ Mode: addrs.ManagedResourceMode, @@ -174,6 +188,13 @@ func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { r.Managed.PreventDestroySet = true } + if attr, exists := lcContent.Attributes["replace_triggered_by"]; exists { + exprs, hclDiags := decodeReplaceTriggeredBy(attr.Expr) + diags = diags.Extend(hclDiags) + + r.TriggersReplacement = append(r.TriggersReplacement, exprs...) + } + if attr, exists := lcContent.Attributes["ignore_changes"]; exists { // ignore_changes can either be a list of relative traversals @@ -234,7 +255,28 @@ func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { } } + } + + for _, block := range lcContent.Blocks { + switch block.Type { + case "precondition", "postcondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + moreDiags = cr.validateSelfReferences(block.Type, r.Addr()) + diags = append(diags, moreDiags...) + + switch block.Type { + case "precondition": + r.Preconditions = append(r.Preconditions, cr) + case "postcondition": + r.Postconditions = append(r.Postconditions, cr) + } + default: + // The cases above should be exhaustive for all block types + // defined in the lifecycle schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } } case "connection": @@ -307,7 +349,7 @@ func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { return r, diags } -func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { +func decodeDataBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { var diags hcl.Diagnostics r := &Resource{ Mode: addrs.DataResourceMode, @@ -368,6 +410,7 @@ func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { } var seenEscapeBlock *hcl.Block + var seenLifecycle *hcl.Block for _, block := range content.Blocks { switch block.Type { @@ -391,21 +434,63 @@ func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { // will see a blend of both. r.Config = hcl.MergeBodies([]hcl.Body{r.Config, block.Body}) - // The rest of these are just here to reserve block type names for future use. case "lifecycle": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported lifecycle block", - Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", - Subject: &block.DefRange, - }) + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: block.DefRange.Ptr(), + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + // All of the attributes defined for resource lifecycle are for + // managed resources only, so we can emit a common error message + // for any given attributes that HCL accepted. + for name, attr := range lcContent.Attributes { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource lifecycle argument", + Detail: fmt.Sprintf("The lifecycle argument %q is defined only for managed resources (\"resource\" blocks), and is not valid for data resources.", name), + Subject: attr.NameRange.Ptr(), + }) + } + + for _, block := range lcContent.Blocks { + switch block.Type { + case "precondition", "postcondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + + moreDiags = cr.validateSelfReferences(block.Type, r.Addr()) + diags = append(diags, moreDiags...) + + switch block.Type { + case "precondition": + r.Preconditions = append(r.Preconditions, cr) + case "postcondition": + r.Postconditions = append(r.Postconditions, cr) + } + default: + // The cases above should be exhaustive for all block types + // defined in the lifecycle schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } default: + // Any other block types are ones we're reserving for future use, + // but don't have any defined meaning today. diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Reserved block type name in data block", Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, + Subject: block.TypeRange.Ptr(), }) } } @@ -413,6 +498,115 @@ func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { return r, diags } +// decodeReplaceTriggeredBy decodes and does basic validation of the +// replace_triggered_by expressions, ensuring they only contains references to +// a single resource, and the only extra variables are count.index or each.key. +func decodeReplaceTriggeredBy(expr hcl.Expression) ([]hcl.Expression, hcl.Diagnostics) { + // Since we are manually parsing the replace_triggered_by argument, we + // need to specially handle json configs, in which case the values will + // be json strings rather than hcl. To simplify parsing however we will + // decode the individual list elements, rather than the entire expression. + isJSON := hcljson.IsJSONExpression(expr) + + exprs, diags := hcl.ExprList(expr) + + for i, expr := range exprs { + if isJSON { + // We can abuse the hcl json api and rely on the fact that calling + // Value on a json expression with no EvalContext will return the + // raw string. We can then parse that as normal hcl syntax, and + // continue with the decoding. + v, ds := expr.Value(nil) + diags = diags.Extend(ds) + if diags.HasErrors() { + continue + } + + expr, ds = hclsyntax.ParseExpression([]byte(v.AsString()), "", expr.Range().Start) + diags = diags.Extend(ds) + if diags.HasErrors() { + continue + } + // make sure to swap out the expression we're returning too + exprs[i] = expr + } + + refs, refDiags := lang.ReferencesInExpr(expr) + for _, diag := range refDiags { + severity := hcl.DiagError + if diag.Severity() == tfdiags.Warning { + severity = hcl.DiagWarning + } + + desc := diag.Description() + + diags = append(diags, &hcl.Diagnostic{ + Severity: severity, + Summary: desc.Summary, + Detail: desc.Detail, + Subject: expr.Range().Ptr(), + }) + } + + if refDiags.HasErrors() { + continue + } + + resourceCount := 0 + for _, ref := range refs { + switch sub := ref.Subject.(type) { + case addrs.Resource, addrs.ResourceInstance: + resourceCount++ + + case addrs.ForEachAttr: + if sub.Name != "key" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid each reference in replace_triggered_by expression", + Detail: "Only each.key may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + case addrs.CountAttr: + if sub.Name != "index" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count reference in replace_triggered_by expression", + Detail: "Only count.index may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + default: + // everything else should be simple traversals + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference in replace_triggered_by expression", + Detail: "Only resources, count.index, and each.key may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + } + + switch { + case resourceCount == 0: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid replace_triggered_by expression", + Detail: "Missing resource reference in replace_triggered_by expression.", + Subject: expr.Range().Ptr(), + }) + case resourceCount > 1: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid replace_triggered_by expression", + Detail: "Multiple resource references in replace_triggered_by expression.", + Subject: expr.Range().Ptr(), + }) + } + } + return exprs, diags +} + type ProviderConfigRef struct { Name string NameRange hcl.Range @@ -551,13 +745,17 @@ var resourceBlockSchema = &hcl.BodySchema{ var dataBlockSchema = &hcl.BodySchema{ Attributes: commonResourceAttributes, Blocks: []hcl.BlockHeaderSchema{ - {Type: "lifecycle"}, // reserved for future use - {Type: "locals"}, // reserved for future use - {Type: "_"}, // meta-argument escaping block + {Type: "lifecycle"}, + {Type: "locals"}, // reserved for future use + {Type: "_"}, // meta-argument escaping block }, } var resourceLifecycleBlockSchema = &hcl.BodySchema{ + // We tell HCL that these elements are all valid for both "resource" + // and "data" lifecycle blocks, but the rules are actually more restrictive + // than that. We deal with that after decoding so that we can return + // more specific error messages than HCL would typically return itself. Attributes: []hcl.AttributeSchema{ { Name: "create_before_destroy", @@ -568,5 +766,12 @@ var resourceLifecycleBlockSchema = &hcl.BodySchema{ { Name: "ignore_changes", }, + { + Name: "replace_triggered_by", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "precondition"}, + {Type: "postcondition"}, }, } diff --git a/internal/terraform/configs/testdata/config-build/child_a/child_a.tf b/internal/terraform/configs/testdata/config-build/child_a/child_a.tf index 6c575970..d3eff0b4 100644 --- a/internal/terraform/configs/testdata/config-build/child_a/child_a.tf +++ b/internal/terraform/configs/testdata/config-build/child_a/child_a.tf @@ -1,4 +1,7 @@ module "child_c" { - source = "child_c" + # In the unit test where this fixture is used, we treat the source strings + # as relative paths from the fixture directory rather than as source + # addresses as we would in a real module walker. + source = "./child_c" } diff --git a/internal/terraform/configs/testdata/config-build/child_b/child_b.tf b/internal/terraform/configs/testdata/config-build/child_b/child_b.tf index 1adcb065..d3eff0b4 100644 --- a/internal/terraform/configs/testdata/config-build/child_b/child_b.tf +++ b/internal/terraform/configs/testdata/config-build/child_b/child_b.tf @@ -1,7 +1,7 @@ module "child_c" { # In the unit test where this fixture is used, we treat the source strings - # as absolute paths rather than as source addresses as we would in a real - # module walker. - source = "child_c" + # as relative paths from the fixture directory rather than as source + # addresses as we would in a real module walker. + source = "./child_c" } diff --git a/internal/terraform/configs/testdata/config-build/root.tf b/internal/terraform/configs/testdata/config-build/root.tf index 7b3c4a75..5e23e40a 100644 --- a/internal/terraform/configs/testdata/config-build/root.tf +++ b/internal/terraform/configs/testdata/config-build/root.tf @@ -1,9 +1,9 @@ module "child_a" { - source = "child_a" + source = "./child_a" } module "child_b" { - source = "child_b" + source = "./child_b" } diff --git a/internal/terraform/configs/testdata/config-diagnostics/empty-configs/warnings b/internal/terraform/configs/testdata/config-diagnostics/empty-configs/warnings index dcf6736e..d2fe48dd 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/empty-configs/warnings +++ b/internal/terraform/configs/testdata/config-diagnostics/empty-configs/warnings @@ -1,4 +1,4 @@ -empty-configs/mod/main.tf:10,1-15: Empty provider configuration blocks are not required; Remove the foo provider block from module.mod -empty-configs/mod/main.tf:13,1-15: Empty provider configuration blocks are not required; Remove the foo.bar provider block from module.mod -empty-configs/mod/main.tf:17,1-15: Empty provider configuration blocks are not required; Remove the baz provider block from module.mod.\nTo ensure the correct provider configuration is used, add baz to the required_providers configuration -empty-configs/mod/main.tf:20,1-15: Empty provider configuration blocks are not required; Remove the baz.bing provider block from module.mod. Add baz.bing to the list of configuration_aliases for baz in required_providers to define the provider configuration name +empty-configs/mod/main.tf:10,1-15: Redundant empty provider block; Earlier versions of Terraform used empty provider blocks ("proxy provider configurations") for child modules to declare their need to be passed a provider configuration by their callers. That approach was ambiguous and is now deprecated. +If you control this module, you can migrate to the new declaration syntax by removing all of the empty provider "foo" blocks and then adding or updating an entry like the following to the required_providers block of module.mod: +empty-configs/mod/main.tf:17,1-15: Redundant empty provider block; Earlier versions of Terraform used empty provider blocks ("proxy provider configurations") for child modules to declare their need to be passed a provider configuration by their callers. That approach was ambiguous and is now deprecated. +If you control this module, you can migrate to the new declaration syntax by removing all of the empty provider "baz" blocks and then adding or updating an entry like the following to the required_providers block of module.mod: diff --git a/internal/terraform/configs/testdata/config-diagnostics/incorrect-type/errors b/internal/terraform/configs/testdata/config-diagnostics/incorrect-type/errors index 28b21085..97a31fe5 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/incorrect-type/errors +++ b/internal/terraform/configs/testdata/config-diagnostics/incorrect-type/errors @@ -1 +1 @@ -incorrect-type/main.tf:15,5-8: Invalid type for provider module.mod.provider["example.com/vendor/foo"]; Cannot use configuration from provider["registry.terraform.io/hashicorp/foo"] for module.mod.provider["example.com/vendor/foo"] +incorrect-type/main.tf:15,11-14: Provider type mismatch; The local name "foo" in the root module represents provider "hashicorp/foo", but "foo" in module.mod represents "example.com/vendor/foo". diff --git a/internal/terraform/configs/testdata/config-diagnostics/incorrect-type/warnings b/internal/terraform/configs/testdata/config-diagnostics/incorrect-type/warnings index a87f1f74..1e35e889 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/incorrect-type/warnings +++ b/internal/terraform/configs/testdata/config-diagnostics/incorrect-type/warnings @@ -1 +1 @@ -incorrect-type/main.tf:16,5-8: Provider baz is undefined; Module module.mod does not declare a provider named baz.\nIf you wish to specify a provider configuration for the module +incorrect-type/main.tf:16,5-8: Reference to undefined provider; There is no explicit declaration for local provider name "baz" in module.mod, so Terraform is assuming you mean to pass a configuration for "hashicorp/baz". diff --git a/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/errors b/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/errors new file mode 100644 index 00000000..359d4760 --- /dev/null +++ b/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/errors @@ -0,0 +1 @@ +main.tf:1,1-20: Invalid provider local name; crash_es is an invalid provider local name diff --git a/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/main.tf b/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/main.tf new file mode 100644 index 00000000..ba846846 --- /dev/null +++ b/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/main.tf @@ -0,0 +1,3 @@ +module "mod" { + source = "./mod" +} diff --git a/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf b/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf new file mode 100644 index 00000000..f50ced1f --- /dev/null +++ b/internal/terraform/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf @@ -0,0 +1,2 @@ +provider "crash_es" { +} diff --git a/internal/terraform/configs/testdata/config-diagnostics/nested-provider/errors b/internal/terraform/configs/testdata/config-diagnostics/nested-provider/errors index 8f44cac7..df929d81 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/nested-provider/errors +++ b/internal/terraform/configs/testdata/config-diagnostics/nested-provider/errors @@ -1,3 +1 @@ -Module module.child.module.child2 contains provider configuration; Providers cannot be configured within modules using count, for_each or depends_on - - +nested-provider/root.tf:2,11-12: Module is incompatible with count, for_each, and depends_on; The module at module.child.module.child2 is a legacy module which contains its own local provider configurations, and so calls to it may not use the count, for_each, or depends_on arguments. diff --git a/internal/terraform/configs/testdata/config-diagnostics/override-provider/errors b/internal/terraform/configs/testdata/config-diagnostics/override-provider/errors index a8d59d6e..6529dd95 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/override-provider/errors +++ b/internal/terraform/configs/testdata/config-diagnostics/override-provider/errors @@ -1 +1 @@ -override-provider/main.tf:17,5-8: Cannot override provider configuration; Provider bar is configured within the module module.mod and cannot be overridden. +override-provider/main.tf:17,5-8: Cannot override provider configuration; The configuration of module.mod has its own local configuration for bar, and so it cannot accept an overridden configuration provided by the root module. diff --git a/internal/terraform/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf b/internal/terraform/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf index 76456399..aa4b7e9f 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf +++ b/internal/terraform/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf @@ -5,3 +5,19 @@ provider "test" { module "mod" { source = "./mod" } + +# FIXME: This test is for an awkward interaction that we've preserved for +# compatibility with what was arguably a bug in earlier versions: if a +# child module tries to use an inherited provider configuration explicitly by +# name then Terraform would historically use the wrong provider configuration. +# +# Since we weren't able to address that bug without breaking backward +# compatibility, instead we emit a warning to prompt the author to be explicit, +# passing in the configuration they intend to use. +# +# This case is particularly awkward because a change in the child module +# (previously referring to a provider only implicitly, but now naming it +# explicitly) can cause a required change in _this_ module (the caller), +# even though the author of the child module would've seen no explicit warning +# that they were making a breaking change. Hopefully we can improve on this +# in a future language edition. diff --git a/internal/terraform/configs/testdata/config-diagnostics/pass-inherited-provider/warnings b/internal/terraform/configs/testdata/config-diagnostics/pass-inherited-provider/warnings index fee53bc4..692e14ec 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/pass-inherited-provider/warnings +++ b/internal/terraform/configs/testdata/config-diagnostics/pass-inherited-provider/warnings @@ -1 +1 @@ -pass-inherited-provider/mod/main.tf:15,16-20: No configuration passed in for provider test in module.mod; Provider test is referenced within module.mod, but no configuration has been supplied +pass-inherited-provider/main.tf:5,1-13: Missing required provider configuration; The configuration for module.mod expects to inherit a configuration for provider hashicorp/test with local name "test", but the root module doesn't pass a configuration under that name. diff --git a/internal/terraform/configs/testdata/config-diagnostics/required-alias/errors b/internal/terraform/configs/testdata/config-diagnostics/required-alias/errors index a1b944a4..b0102443 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/required-alias/errors +++ b/internal/terraform/configs/testdata/config-diagnostics/required-alias/errors @@ -1 +1 @@ -required-alias/main.tf:1,1-13: No configuration for provider foo.bar; Configuration required for module.mod.provider["registry.terraform.io/hashicorp/foo"].bar +required-alias/main.tf:1,1-13: Missing required provider configuration; The child module requires an additional configuration for provider hashicorp/foo, with the local name "foo.bar". diff --git a/internal/terraform/configs/testdata/config-diagnostics/unexpected-provider/warnings b/internal/terraform/configs/testdata/config-diagnostics/unexpected-provider/warnings index 0e41b39a..82be3b70 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/unexpected-provider/warnings +++ b/internal/terraform/configs/testdata/config-diagnostics/unexpected-provider/warnings @@ -1,2 +1 @@ -unexpected-provider/main.tf:13,5-8: Provider foo is undefined; Module module.mod does not declare a provider named foo. - +unexpected-provider/main.tf:13,5-8: Reference to undefined provider; There is no explicit declaration for local provider name "foo" in module.mod, so Terraform is assuming you mean to pass a configuration for "hashicorp/foo". diff --git a/internal/terraform/configs/testdata/config-diagnostics/unknown-root-provider/warnings b/internal/terraform/configs/testdata/config-diagnostics/unknown-root-provider/warnings index 766670d9..4d85a512 100644 --- a/internal/terraform/configs/testdata/config-diagnostics/unknown-root-provider/warnings +++ b/internal/terraform/configs/testdata/config-diagnostics/unknown-root-provider/warnings @@ -1 +1 @@ -unknown-root-provider/main.tf:5,11-14: Provider bar is undefined; No provider named bar has been declared in the root module +unknown-root-provider/main.tf:5,11-14: Reference to undefined provider; There is no explicit declaration for local provider name "bar" in the root module, so Terraform is assuming you mean to pass a configuration for provider "hashicorp/bar". diff --git a/internal/terraform/configs/testdata/error-files/module-invalid-registry-source-with-module.tf b/internal/terraform/configs/testdata/error-files/module-invalid-registry-source-with-module.tf new file mode 100644 index 00000000..0029be8f --- /dev/null +++ b/internal/terraform/configs/testdata/error-files/module-invalid-registry-source-with-module.tf @@ -0,0 +1,5 @@ + +module "test" { + source = "---.com/HashiCorp/Consul/aws" # ERROR: Invalid registry module source address + version = "1.0.0" # Makes Terraform assume "source" is a module address +} diff --git a/internal/terraform/configs/testdata/error-files/module-local-source-with-version.tf b/internal/terraform/configs/testdata/error-files/module-local-source-with-version.tf new file mode 100644 index 00000000..f570d65f --- /dev/null +++ b/internal/terraform/configs/testdata/error-files/module-local-source-with-version.tf @@ -0,0 +1,5 @@ + +module "test" { + source = "../boop" # ERROR: Invalid registry module source address + version = "1.0.0" # Makes Terraform assume "source" is a module address +} diff --git a/internal/terraform/configs/testdata/error-files/precondition-postcondition-constant.tf b/internal/terraform/configs/testdata/error-files/precondition-postcondition-constant.tf new file mode 100644 index 00000000..30f44313 --- /dev/null +++ b/internal/terraform/configs/testdata/error-files/precondition-postcondition-constant.tf @@ -0,0 +1,34 @@ +resource "test" "test" { + lifecycle { + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } + postcondition { + condition = true # ERROR: Invalid postcondition expression + error_message = "Must be true." + } + } +} + +data "test" "test" { + lifecycle { + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } + postcondition { + condition = true # ERROR: Invalid postcondition expression + error_message = "Must be true." + } + } +} + +output "test" { + value = "" + + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } +} diff --git a/internal/terraform/configs/testdata/error-files/precondition-postcondition-selfref.tf b/internal/terraform/configs/testdata/error-files/precondition-postcondition-selfref.tf new file mode 100644 index 00000000..5f295c1c --- /dev/null +++ b/internal/terraform/configs/testdata/error-files/precondition-postcondition-selfref.tf @@ -0,0 +1,55 @@ +resource "test" "test" { + lifecycle { + precondition { + condition = test.test.foo # ERROR: Invalid reference in precondition + error_message = "Cannot refer to self." + } + postcondition { + condition = test.test.foo # ERROR: Invalid reference in postcondition + error_message = "Cannot refer to self." + } + } +} + +data "test" "test" { + lifecycle { + precondition { + condition = data.test.test.foo # ERROR: Invalid reference in precondition + error_message = "Cannot refer to self." + } + postcondition { + condition = data.test.test.foo # ERROR: Invalid reference in postcondition + error_message = "Cannot refer to self." + } + } +} + +resource "test" "test_counted" { + count = 1 + + lifecycle { + precondition { + condition = test.test_counted[0].foo # ERROR: Invalid reference in precondition + error_message = "Cannot refer to self." + } + postcondition { + condition = test.test_counted[0].foo # ERROR: Invalid reference in postcondition + error_message = "Cannot refer to self." + } + } +} + +data "test" "test_counted" { + count = 1 + + lifecycle { + precondition { + condition = data.test.test_counted[0].foo # ERROR: Invalid reference in precondition + error_message = "Cannot refer to self." + } + postcondition { + condition = data.test.test_counted[0].foo # ERROR: Invalid reference in postcondition + error_message = "Cannot refer to self." + } + } +} diff --git a/internal/terraform/configs/testdata/invalid-files/data-reserved-lifecycle.tf b/internal/terraform/configs/testdata/invalid-files/data-reserved-lifecycle.tf deleted file mode 100644 index a1e1a1e6..00000000 --- a/internal/terraform/configs/testdata/invalid-files/data-reserved-lifecycle.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "test" "foo" { - lifecycle {} -} diff --git a/internal/terraform/configs/testdata/invalid-files/data-resource-lifecycle.tf b/internal/terraform/configs/testdata/invalid-files/data-resource-lifecycle.tf index b0c34d46..7c1aebe5 100644 --- a/internal/terraform/configs/testdata/invalid-files/data-resource-lifecycle.tf +++ b/internal/terraform/configs/testdata/invalid-files/data-resource-lifecycle.tf @@ -1,5 +1,7 @@ data "example" "example" { lifecycle { - # This block intentionally left blank + # The lifecycle arguments are not valid for data resources: + # only the precondition and postcondition blocks are allowed. + ignore_changes = [] } } diff --git a/internal/terraform/configs/testdata/invalid-files/everything-is-a-plan.tf b/internal/terraform/configs/testdata/invalid-files/everything-is-a-plan.tf new file mode 100644 index 00000000..e66766be --- /dev/null +++ b/internal/terraform/configs/testdata/invalid-files/everything-is-a-plan.tf @@ -0,0 +1,11 @@ +# experiments.EverythingIsAPlan exists but is not registered as an active (or +# concluded) experiment, so this should fail until the experiment "gate" is +# removed. +terraform { + experiments = [everything_is_a_plan] +} + +moved { + from = test_instance.foo + to = test_instance.bar +} \ No newline at end of file diff --git a/internal/terraform/configs/testdata/invalid-files/triggered-invalid-each.tf b/internal/terraform/configs/testdata/invalid-files/triggered-invalid-each.tf new file mode 100644 index 00000000..0649ecc9 --- /dev/null +++ b/internal/terraform/configs/testdata/invalid-files/triggered-invalid-each.tf @@ -0,0 +1,7 @@ +resource "test_resource" "a" { + for_each = var.input + lifecycle { + // cannot use each.val + replace_triggered_by = [ test_resource.b[each.val] ] + } +} diff --git a/internal/terraform/configs/testdata/invalid-files/triggered-invalid-expression.tf b/internal/terraform/configs/testdata/invalid-files/triggered-invalid-expression.tf new file mode 100644 index 00000000..8bf8af5b --- /dev/null +++ b/internal/terraform/configs/testdata/invalid-files/triggered-invalid-expression.tf @@ -0,0 +1,6 @@ +resource "test_resource" "a" { + count = 1 + lifecycle { + replace_triggered_by = [ not_a_reference ] + } +} diff --git a/internal/terraform/configs/testdata/invalid-files/variable-validation-bad-msg.tf b/internal/terraform/configs/testdata/invalid-files/variable-validation-bad-msg.tf deleted file mode 100644 index 65df4557..00000000 --- a/internal/terraform/configs/testdata/invalid-files/variable-validation-bad-msg.tf +++ /dev/null @@ -1,6 +0,0 @@ -variable "validation" { - validation { - condition = var.validation != 4 - error_message = "not four" # ERROR: Invalid validation error message - } -} diff --git a/internal/terraform/configs/testdata/invalid-files/variable-validation-condition-badref.tf b/internal/terraform/configs/testdata/invalid-files/variable-validation-condition-badref.tf index 42f4aa5f..9b9e9357 100644 --- a/internal/terraform/configs/testdata/invalid-files/variable-validation-condition-badref.tf +++ b/internal/terraform/configs/testdata/invalid-files/variable-validation-condition-badref.tf @@ -9,3 +9,10 @@ variable "validation" { error_message = "Must be five." } } + +variable "validation_error_expression" { + validation { + condition = var.validation_error_expression != 1 + error_message = "Cannot equal ${local.foo}." # ERROR: Invalid reference in variable validation + } +} diff --git a/internal/terraform/configs/testdata/invalid-modules/nullable-with-default-null/main.tf b/internal/terraform/configs/testdata/invalid-modules/nullable-with-default-null/main.tf new file mode 100644 index 00000000..bf7a58a4 --- /dev/null +++ b/internal/terraform/configs/testdata/invalid-modules/nullable-with-default-null/main.tf @@ -0,0 +1,5 @@ +variable "in" { + type = number + nullable = false + default = null +} diff --git a/internal/terraform/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf b/internal/terraform/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf new file mode 100644 index 00000000..2de9a58d --- /dev/null +++ b/internal/terraform/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf @@ -0,0 +1,14 @@ +terraform { + cloud { + organization = "foo" + should_not_be_present_with_override = true + } +} + +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/internal/terraform/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf b/internal/terraform/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf new file mode 100644 index 00000000..17ef0115 --- /dev/null +++ b/internal/terraform/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf @@ -0,0 +1,11 @@ +terraform { + cloud { + organization = "foo" + } +} + +terraform { + cloud { + organization = "bar" + } +} diff --git a/internal/terraform/configs/testdata/nested-cloud-warning/child/child.tf b/internal/terraform/configs/testdata/nested-cloud-warning/child/child.tf new file mode 100644 index 00000000..540b9217 --- /dev/null +++ b/internal/terraform/configs/testdata/nested-cloud-warning/child/child.tf @@ -0,0 +1,6 @@ +terraform { + # Only the root module can declare a Cloud configuration. Terraform should emit a warning + # about this child module Cloud declaration. + cloud { + } +} diff --git a/internal/terraform/configs/testdata/nested-cloud-warning/root.tf b/internal/terraform/configs/testdata/nested-cloud-warning/root.tf new file mode 100644 index 00000000..1f95749f --- /dev/null +++ b/internal/terraform/configs/testdata/nested-cloud-warning/root.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/internal/terraform/configs/testdata/nested-errors/child_a/child_a.tf b/internal/terraform/configs/testdata/nested-errors/child_a/child_a.tf index 6c575970..d2fb89fa 100644 --- a/internal/terraform/configs/testdata/nested-errors/child_a/child_a.tf +++ b/internal/terraform/configs/testdata/nested-errors/child_a/child_a.tf @@ -1,4 +1,7 @@ module "child_c" { - source = "child_c" + # Note: this test case has an unrealistic module loader that resolves all + # sources as relative to the fixture directory, rather than to the + # current module directory as Terraform normally would. + source = "./child_c" } diff --git a/internal/terraform/configs/testdata/nested-errors/root.tf b/internal/terraform/configs/testdata/nested-errors/root.tf index c9db0204..d596eb06 100644 --- a/internal/terraform/configs/testdata/nested-errors/root.tf +++ b/internal/terraform/configs/testdata/nested-errors/root.tf @@ -1,3 +1,3 @@ module "child_a" { - source = "child_a" + source = "./child_a" } diff --git a/internal/terraform/configs/testdata/valid-files/cloud.tf b/internal/terraform/configs/testdata/valid-files/cloud.tf new file mode 100644 index 00000000..91985fca --- /dev/null +++ b/internal/terraform/configs/testdata/valid-files/cloud.tf @@ -0,0 +1,10 @@ + +terraform { + cloud { + foo = "bar" + + baz { + bar = "foo" + } + } +} diff --git a/internal/terraform/configs/testdata/valid-files/preconditions-postconditions.tf b/internal/terraform/configs/testdata/valid-files/preconditions-postconditions.tf new file mode 100644 index 00000000..6f48840a --- /dev/null +++ b/internal/terraform/configs/testdata/valid-files/preconditions-postconditions.tf @@ -0,0 +1,34 @@ +resource "test" "test" { + lifecycle { + precondition { + condition = path.module != "" + error_message = "Must be true." + } + postcondition { + condition = path.module != "" + error_message = "Must be true." + } + } +} + +data "test" "test" { + lifecycle { + precondition { + condition = path.module != "" + error_message = "Must be true." + } + postcondition { + condition = path.module != "" + error_message = "Must be true." + } + } +} + +output "test" { + value = "" + + precondition { + condition = path.module != "" + error_message = "Must be true." + } +} diff --git a/internal/terraform/configs/testdata/valid-files/resources.tf b/internal/terraform/configs/testdata/valid-files/resources.tf index 53fb7453..aab038ea 100644 --- a/internal/terraform/configs/testdata/valid-files/resources.tf +++ b/internal/terraform/configs/testdata/valid-files/resources.tf @@ -25,6 +25,7 @@ resource "aws_security_group" "firewall" { } resource "aws_instance" "web" { + count = 2 ami = "ami-1234" security_groups = [ "foo", @@ -40,3 +41,9 @@ resource "aws_instance" "web" { aws_security_group.firewall, ] } + +resource "aws_instance" "depends" { + lifecycle { + replace_triggered_by = [ aws_instance.web[1], aws_security_group.firewall.id ] + } +} diff --git a/internal/terraform/configs/testdata/valid-files/resources.tf.json b/internal/terraform/configs/testdata/valid-files/resources.tf.json new file mode 100644 index 00000000..627963d0 --- /dev/null +++ b/internal/terraform/configs/testdata/valid-files/resources.tf.json @@ -0,0 +1,18 @@ +{ + "resource": { + "test_object": { + "a": { + "count": 1, + "test_string": "new" + }, + "b": { + "count": 1, + "lifecycle": { + "replace_triggered_by": [ + "test_object.a[count.index].test_string" + ] + } + } + } + } +} diff --git a/internal/terraform/configs/testdata/valid-files/variable_validation.tf b/internal/terraform/configs/testdata/valid-files/variable_validation.tf index 0a0c2dd5..20e227e0 100644 --- a/internal/terraform/configs/testdata/valid-files/variable_validation.tf +++ b/internal/terraform/configs/testdata/valid-files/variable_validation.tf @@ -4,3 +4,19 @@ variable "validation" { error_message = "Must be five." } } + +variable "validation_function" { + type = list(string) + validation { + condition = length(var.validation_function) > 0 + error_message = "Must not be empty." + } +} + +variable "validation_error_expression" { + type = list(string) + validation { + condition = length(var.validation_error_expression) < 10 + error_message = "Too long (${length(var.validation_error_expression)} is greater than 10)." + } +} diff --git a/internal/terraform/configs/testdata/valid-files/variables.tf b/internal/terraform/configs/testdata/valid-files/variables.tf index d641966b..16640c98 100644 --- a/internal/terraform/configs/testdata/valid-files/variables.tf +++ b/internal/terraform/configs/testdata/valid-files/variables.tf @@ -30,3 +30,15 @@ variable "sensitive_value" { } sensitive = true } + +variable "nullable" { + type = string + nullable = true + default = "ok" +} + +variable "nullable_default_null" { + type = map(string) + nullable = true + default = null +} diff --git a/internal/terraform/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf b/internal/terraform/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf new file mode 100644 index 00000000..fab882d7 --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf @@ -0,0 +1,24 @@ +moved { + from = test.foo + to = test.bar +} + +moved { + from = test.foo + to = test.bar["bloop"] +} + +moved { + from = module.a + to = module.b +} + +moved { + from = module.a + to = module.a["foo"] +} + +moved { + from = test.foo + to = module.a.test.foo +} diff --git a/internal/terraform/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf b/internal/terraform/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf new file mode 100644 index 00000000..afc9f5a7 --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf @@ -0,0 +1,6 @@ +# One more moved block in a separate file just to make sure the +# appending of multiple files works properly. +moved { + from = data.test.foo + to = data.test.bar +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-backend-no-base/main.tf b/internal/terraform/configs/testdata/valid-modules/override-backend-no-base/main.tf new file mode 100644 index 00000000..7bb1380e --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-backend-no-base/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-backend-no-base/override.tf b/internal/terraform/configs/testdata/valid-modules/override-backend-no-base/override.tf new file mode 100644 index 00000000..d57fade6 --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-backend-no-base/override.tf @@ -0,0 +1,5 @@ +terraform { + backend "bar" { + path = "CHANGED/relative/path/to/terraform.tfstate" + } +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-backend-with-cloud/main.tf b/internal/terraform/configs/testdata/valid-modules/override-backend-with-cloud/main.tf new file mode 100644 index 00000000..56fb72f3 --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-backend-with-cloud/main.tf @@ -0,0 +1,13 @@ +terraform { + backend "foo" { + path = "relative/path/to/terraform.tfstate" + } +} + +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-backend-with-cloud/override.tf b/internal/terraform/configs/testdata/valid-modules/override-backend-with-cloud/override.tf new file mode 100644 index 00000000..51ae925f --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-backend-with-cloud/override.tf @@ -0,0 +1,5 @@ +terraform { + cloud { + organization = "foo" + } +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-backend/main.tf b/internal/terraform/configs/testdata/valid-modules/override-backend/main.tf new file mode 100644 index 00000000..56fb72f3 --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-backend/main.tf @@ -0,0 +1,13 @@ +terraform { + backend "foo" { + path = "relative/path/to/terraform.tfstate" + } +} + +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-backend/override.tf b/internal/terraform/configs/testdata/valid-modules/override-backend/override.tf new file mode 100644 index 00000000..d57fade6 --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-backend/override.tf @@ -0,0 +1,5 @@ +terraform { + backend "bar" { + path = "CHANGED/relative/path/to/terraform.tfstate" + } +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-cloud-no-base/main.tf b/internal/terraform/configs/testdata/valid-modules/override-cloud-no-base/main.tf new file mode 100644 index 00000000..7bb1380e --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-cloud-no-base/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-cloud-no-base/override.tf b/internal/terraform/configs/testdata/valid-modules/override-cloud-no-base/override.tf new file mode 100644 index 00000000..51ae925f --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-cloud-no-base/override.tf @@ -0,0 +1,5 @@ +terraform { + cloud { + organization = "foo" + } +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-cloud/main.tf b/internal/terraform/configs/testdata/valid-modules/override-cloud/main.tf new file mode 100644 index 00000000..2de9a58d --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-cloud/main.tf @@ -0,0 +1,14 @@ +terraform { + cloud { + organization = "foo" + should_not_be_present_with_override = true + } +} + +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-cloud/override.tf b/internal/terraform/configs/testdata/valid-modules/override-cloud/override.tf new file mode 100644 index 00000000..a4a7752c --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-cloud/override.tf @@ -0,0 +1,5 @@ +terraform { + cloud { + organization = "CHANGED" + } +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-ignore-changes/main.tf b/internal/terraform/configs/testdata/valid-modules/override-ignore-changes/main.tf new file mode 100644 index 00000000..55ae5151 --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-ignore-changes/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + foo = "bar" +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-ignore-changes/main_override.tf b/internal/terraform/configs/testdata/valid-modules/override-ignore-changes/main_override.tf new file mode 100644 index 00000000..f9cd9a5d --- /dev/null +++ b/internal/terraform/configs/testdata/valid-modules/override-ignore-changes/main_override.tf @@ -0,0 +1,6 @@ +resource "test_instance" "foo" { + foo = "bar" + lifecycle { + ignore_changes = all + } +} diff --git a/internal/terraform/configs/testdata/valid-modules/override-variable/b_override.tf b/internal/terraform/configs/testdata/valid-modules/override-variable/b_override.tf index 21dbe82e..f09ce538 100644 --- a/internal/terraform/configs/testdata/valid-modules/override-variable/b_override.tf +++ b/internal/terraform/configs/testdata/valid-modules/override-variable/b_override.tf @@ -1,4 +1,5 @@ variable "fully_overridden" { + nullable = false default = "b_override" description = "b_override description" type = string diff --git a/internal/terraform/copy/copy_dir.go b/internal/terraform/copy/copy_dir.go new file mode 100644 index 00000000..f76470fd --- /dev/null +++ b/internal/terraform/copy/copy_dir.go @@ -0,0 +1,146 @@ +package copy + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// CopyDir recursively copies all of the files within the directory given in +// src to the directory given in dst. +// +// Both directories should already exist. If the destination directory is +// non-empty then the new files will merge in with the old, overwriting any +// files that have a relative path in common between source and destination. +// +// Recursive copying of directories is inevitably a rather opinionated sort of +// operation, so this function won't be appropriate for all use-cases. Some +// of the "opinions" it has are described in the following paragraphs: +// +// Symlinks in the source directory are recreated with the same target in the +// destination directory. If the symlink is to a directory itself, that +// directory is not recursively visited for further copying. +// +// File and directory modes are not preserved exactly, but the executable +// flag is preserved for files on operating systems where it is significant. +// +// Any "dot files" it encounters along the way are skipped, even on platforms +// that do not normally ascribe special meaning to files with names starting +// with dots. +// +// Callers may rely on the above details and other undocumented details of +// this function, so if you intend to change it be sure to review the callers +// first and make sure they are compatible with the change you intend to make. +func CopyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := SameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// SameFile returns true if the two given paths refer to the same physical +// file on disk, using the unique file identifiers from the underlying +// operating system. For example, on Unix systems this checks whether the +// two files are on the same device and have the same inode. +func SameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aInfo, err := os.Lstat(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bInfo, err := os.Lstat(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + return os.SameFile(aInfo, bInfo), nil +} diff --git a/internal/terraform/copy/copy_dir_test.go b/internal/terraform/copy/copy_dir_test.go new file mode 100644 index 00000000..244bcbb4 --- /dev/null +++ b/internal/terraform/copy/copy_dir_test.go @@ -0,0 +1,99 @@ +package copy + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// TestCopyDir_symlinks sets up a directory with two submodules, +// one being a symlink to the other +// +// The resultant file structure is as follows: +// ├── modules +// │   ├── symlink-module -> test-module +// │   └── test-module +// │   └── main.tf +// └── target +// ├── symlink-module -> test-module +// └── test-module +// └── main.tf + +func TestCopyDir_symlinks(t *testing.T) { + tmpdir := t.TempDir() + + moduleDir := filepath.Join(tmpdir, "modules") + err := os.Mkdir(moduleDir, os.ModePerm) + if err != nil { + t.Fatal(err) + } + + subModuleDir := filepath.Join(moduleDir, "test-module") + err = os.Mkdir(subModuleDir, os.ModePerm) + if err != nil { + t.Fatal(err) + } + + err = ioutil.WriteFile(filepath.Join(subModuleDir, "main.tf"), []byte("hello"), 0644) + if err != nil { + t.Fatal(err) + } + + err = os.Symlink("test-module", filepath.Join(moduleDir, "symlink-module")) + if err != nil { + t.Fatal(err) + } + + targetDir := filepath.Join(tmpdir, "target") + os.Mkdir(targetDir, os.ModePerm) + + err = CopyDir(targetDir, moduleDir) + if err != nil { + t.Fatal(err) + } + + if _, err = os.Lstat(filepath.Join(targetDir, "test-module", "main.tf")); os.IsNotExist(err) { + t.Fatal("target test-module/main.tf was not created") + } + + if _, err = os.Lstat(filepath.Join(targetDir, "symlink-module", "main.tf")); os.IsNotExist(err) { + t.Fatal("target symlink-module/main.tf was not created") + } +} + +func TestCopyDir_symlink_file(t *testing.T) { + tmpdir := t.TempDir() + + moduleDir := filepath.Join(tmpdir, "modules") + err := os.Mkdir(moduleDir, os.ModePerm) + if err != nil { + t.Fatal(err) + } + + err = ioutil.WriteFile(filepath.Join(moduleDir, "main.tf"), []byte("hello"), 0644) + if err != nil { + t.Fatal(err) + } + + err = os.Symlink("main.tf", filepath.Join(moduleDir, "symlink.tf")) + if err != nil { + t.Fatal(err) + } + + targetDir := filepath.Join(tmpdir, "target") + os.Mkdir(targetDir, os.ModePerm) + + err = CopyDir(targetDir, moduleDir) + if err != nil { + t.Fatal(err) + } + + if _, err = os.Lstat(filepath.Join(targetDir, "main.tf")); os.IsNotExist(err) { + t.Fatal("target/main.tf was not created") + } + + if _, err = os.Lstat(filepath.Join(targetDir, "symlink.tf")); os.IsNotExist(err) { + t.Fatal("target/symlink.tf was not created") + } +} diff --git a/internal/terraform/copy/copy_file.go b/internal/terraform/copy/copy_file.go new file mode 100644 index 00000000..d4833bc0 --- /dev/null +++ b/internal/terraform/copy/copy_file.go @@ -0,0 +1,52 @@ +package copy + +import ( + "io" + "os" +) + +// From: https://gist.github.com/m4ng0squ4sh/92462b38df26839a3ca324697c8cba04 + +// CopyFile copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. The file mode will be copied from the source and +// the copied data is synced/flushed to stable storage. +func CopyFile(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + if e := out.Close(); e != nil { + err = e + } + }() + + _, err = io.Copy(out, in) + if err != nil { + return + } + + err = out.Sync() + if err != nil { + return + } + + si, err := os.Stat(src) + if err != nil { + return + } + err = os.Chmod(dst, si.Mode()) + if err != nil { + return + } + + return +} diff --git a/internal/terraform/depsfile/doc.go b/internal/terraform/depsfile/doc.go new file mode 100644 index 00000000..a0f25b91 --- /dev/null +++ b/internal/terraform/depsfile/doc.go @@ -0,0 +1,22 @@ +// Package depsfile contains the logic for reading and writing Terraform's +// dependency lock and development override configuration files. +// +// These files are separate from the main Terraform configuration files (.tf) +// for a number of reasons. The first is to help establish a distinction +// where .tf files configure a particular module while these configure +// a whole configuration tree. Another, more practical consideration is that +// we intend both of these files to be primarily maintained automatically by +// Terraform itself, rather than by human-originated edits, and so keeping +// them separate means that it's easier to distinguish the files that Terraform +// will change automatically during normal workflow from the files that +// Terraform only edits on direct request. +// +// Both files use HCL syntax, for consistency with other files in Terraform +// that we expect humans to (in this case, only occasionally) edit directly. +// A dependency lock file tracks the most recently selected upstream versions +// of each dependency, and is intended for checkin to version control. +// A development override file allows for temporarily overriding upstream +// dependencies with local files/directories on disk as an aid to testing +// a cross-codebase change during development, and should not be saved in +// version control. +package depsfile diff --git a/internal/terraform/depsfile/locks.go b/internal/terraform/depsfile/locks.go new file mode 100644 index 00000000..08474394 --- /dev/null +++ b/internal/terraform/depsfile/locks.go @@ -0,0 +1,415 @@ +package depsfile + +import ( + "fmt" + "sort" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/getproviders" +) + +// Locks is the top-level type representing the information retained in a +// dependency lock file. +// +// Locks and the other types used within it are mutable via various setter +// methods, but they are not safe for concurrent modifications, so it's the +// caller's responsibility to prevent concurrent writes and writes concurrent +// with reads. +type Locks struct { + providers map[addrs.Provider]*ProviderLock + + // overriddenProviders is a subset of providers which we might be tracking + // in field providers but whose lock information we're disregarding for + // this particular run due to some feature that forces Terraform to not + // use a normally-installed plugin for it. For example, the "provider dev + // overrides" feature means that we'll be using an arbitrary directory on + // disk as the package, regardless of what might be selected in "providers". + // + // overriddenProviders is an in-memory-only annotation, never stored as + // part of a lock file and thus not persistent between Terraform runs. + // The CLI layer is generally the one responsible for populating this, + // by calling SetProviderOverridden in response to CLI Configuration + // settings, environment variables, or whatever similar sources. + overriddenProviders map[addrs.Provider]struct{} + + // TODO: In future we'll also have module locks, but the design of that + // still needs some more work and we're deferring that to get the + // provider locking capability out sooner, because it's more common to + // directly depend on providers maintained outside your organization than + // modules maintained outside your organization. + + // sources is a copy of the map of source buffers produced by the HCL + // parser during loading, which we retain only so that the caller can + // use it to produce source code snippets in error messages. + sources map[string][]byte +} + +// NewLocks constructs and returns a new Locks object that initially contains +// no locks at all. +func NewLocks() *Locks { + return &Locks{ + providers: make(map[addrs.Provider]*ProviderLock), + + // no "sources" here, because that's only for locks objects loaded + // from files. + } +} + +// Provider returns the stored lock for the given provider, or nil if that +// provider currently has no lock. +func (l *Locks) Provider(addr addrs.Provider) *ProviderLock { + return l.providers[addr] +} + +// AllProviders returns a map describing all of the provider locks in the +// receiver. +func (l *Locks) AllProviders() map[addrs.Provider]*ProviderLock { + // We return a copy of our internal map so that future calls to + // SetProvider won't modify the map we're returning, or vice-versa. + ret := make(map[addrs.Provider]*ProviderLock, len(l.providers)) + for k, v := range l.providers { + ret[k] = v + } + return ret +} + +// SetProvider creates a new lock or replaces the existing lock for the given +// provider. +// +// SetProvider returns the newly-created provider lock object, which +// invalidates any ProviderLock object previously returned from Provider or +// SetProvider for the given provider address. +// +// The ownership of the backing array for the slice of hashes passes to this +// function, and so the caller must not read or write that backing array after +// calling SetProvider. +// +// Only lockable providers can be passed to this method. If you pass a +// non-lockable provider address then this function will panic. Use +// function ProviderIsLockable to determine whether a particular provider +// should participate in the version locking mechanism. +func (l *Locks) SetProvider(addr addrs.Provider, version getproviders.Version, constraints getproviders.VersionConstraints, hashes []getproviders.Hash) *ProviderLock { + if !ProviderIsLockable(addr) { + panic(fmt.Sprintf("Locks.SetProvider with non-lockable provider %s", addr)) + } + + new := NewProviderLock(addr, version, constraints, hashes) + l.providers[new.addr] = new + return new +} + +// RemoveProvider removes any existing lock file entry for the given provider. +// +// If the given provider did not already have a lock entry, RemoveProvider is +// a no-op. +// +// Only lockable providers can be passed to this method. If you pass a +// non-lockable provider address then this function will panic. Use +// function ProviderIsLockable to determine whether a particular provider +// should participate in the version locking mechanism. +func (l *Locks) RemoveProvider(addr addrs.Provider) { + if !ProviderIsLockable(addr) { + panic(fmt.Sprintf("Locks.RemoveProvider with non-lockable provider %s", addr)) + } + + delete(l.providers, addr) +} + +// SetProviderOverridden records that this particular Terraform process will +// not pay attention to the recorded lock entry for the given provider, and +// will instead access that provider's functionality in some other special +// way that isn't sensitive to provider version selections or checksums. +// +// This is an in-memory-only annotation which lives only inside a particular +// Locks object, and is never persisted as part of a saved lock file on disk. +// It's valid to still use other methods of the reciever to access +// already-stored lock information and to update lock information for an +// overridden provider, but some callers may need to use ProviderIsOverridden +// to selectively disregard stored lock information for overridden providers, +// depending on what they intended to use the lock information for. +func (l *Locks) SetProviderOverridden(addr addrs.Provider) { + if l.overriddenProviders == nil { + l.overriddenProviders = make(map[addrs.Provider]struct{}) + } + l.overriddenProviders[addr] = struct{}{} +} + +// ProviderIsOverridden returns true only if the given provider address was +// previously registered as overridden by calling SetProviderOverridden. +func (l *Locks) ProviderIsOverridden(addr addrs.Provider) bool { + _, ret := l.overriddenProviders[addr] + return ret +} + +// SetSameOverriddenProviders updates the receiver to mark as overridden all +// of the same providers already marked as overridden in the other given locks. +// +// This allows propagating override information between different lock objects, +// as if calling SetProviderOverridden for each address already overridden +// in the other given locks. If the reciever already has overridden providers, +// SetSameOverriddenProviders will preserve them. +func (l *Locks) SetSameOverriddenProviders(other *Locks) { + if other == nil { + return + } + for addr := range other.overriddenProviders { + l.SetProviderOverridden(addr) + } +} + +// NewProviderLock creates a new ProviderLock object that isn't associated +// with any Locks object. +// +// This is here primarily for testing. Most callers should use Locks.SetProvider +// to construct a new provider lock and insert it into a Locks object at the +// same time. +// +// The ownership of the backing array for the slice of hashes passes to this +// function, and so the caller must not read or write that backing array after +// calling NewProviderLock. +// +// Only lockable providers can be passed to this method. If you pass a +// non-lockable provider address then this function will panic. Use +// function ProviderIsLockable to determine whether a particular provider +// should participate in the version locking mechanism. +func NewProviderLock(addr addrs.Provider, version getproviders.Version, constraints getproviders.VersionConstraints, hashes []getproviders.Hash) *ProviderLock { + if !ProviderIsLockable(addr) { + panic(fmt.Sprintf("Locks.NewProviderLock with non-lockable provider %s", addr)) + } + + // Normalize the hashes into lexical order so that we can do straightforward + // equality tests between different locks for the same provider. The + // hashes are logically a set, so the given order is insignificant. + sort.Slice(hashes, func(i, j int) bool { + return string(hashes[i]) < string(hashes[j]) + }) + + // This is a slightly-tricky in-place deduping to avoid unnecessarily + // allocating a new array in the common case where there are no duplicates: + // we iterate over "hashes" at the same time as appending to another slice + // with the same backing array, relying on the fact that deduping can only + // _skip_ elements from the input, and will never generate additional ones + // that would cause the writer to get ahead of the reader. This also + // assumes that we already sorted the items, which means that any duplicates + // will be consecutive in the sequence. + dedupeHashes := hashes[:0] + prevHash := getproviders.NilHash + for _, hash := range hashes { + if hash != prevHash { + dedupeHashes = append(dedupeHashes, hash) + prevHash = hash + } + } + + return &ProviderLock{ + addr: addr, + version: version, + versionConstraints: constraints, + hashes: dedupeHashes, + } +} + +// ProviderIsLockable returns true if the given provider is eligible for +// version locking. +// +// Currently, all providers except builtin and legacy providers are eligible +// for locking. +func ProviderIsLockable(addr addrs.Provider) bool { + return !(addr.IsBuiltIn() || addr.IsLegacy()) +} + +// Sources returns the source code of the file the receiver was generated from, +// or an empty map if the receiver wasn't generated from a file. +// +// This return type matches the one expected by HCL diagnostics printers to +// produce source code snapshots, which is the only intended use for this +// method. +func (l *Locks) Sources() map[string][]byte { + return l.sources +} + +// Equal returns true if the given Locks represents the same information as +// the receiver. +// +// Equal explicitly _does not_ consider the equality of version constraints +// in the saved locks, because those are saved only as hints to help the UI +// explain what's changed between runs, and are never used as part of +// dependency installation decisions. +func (l *Locks) Equal(other *Locks) bool { + if len(l.providers) != len(other.providers) { + return false + } + for addr, thisLock := range l.providers { + otherLock, ok := other.providers[addr] + if !ok { + return false + } + + if thisLock.addr != otherLock.addr { + // It'd be weird to get here because we already looked these up + // by address above. + return false + } + if thisLock.version != otherLock.version { + // Equality rather than "Version.Same" because changes to the + // build metadata are significant for the purpose of this function: + // it's a different package even if it has the same precedence. + return false + } + + // Although "hashes" is declared as a slice, it's logically an + // unordered set. However, we normalize the slice of hashes when + // recieving it in NewProviderLock, so we can just do a simple + // item-by-item equality test here. + if len(thisLock.hashes) != len(otherLock.hashes) { + return false + } + for i := range thisLock.hashes { + if thisLock.hashes[i] != otherLock.hashes[i] { + return false + } + } + } + // We don't need to worry about providers that are in "other" but not + // in the receiver, because we tested the lengths being equal above. + + return true +} + +// EqualProviderAddress returns true if the given Locks have the same provider +// address as the receiver. This doesn't check version and hashes. +func (l *Locks) EqualProviderAddress(other *Locks) bool { + if len(l.providers) != len(other.providers) { + return false + } + + for addr := range l.providers { + _, ok := other.providers[addr] + if !ok { + return false + } + } + + return true +} + +// Empty returns true if the given Locks object contains no actual locks. +// +// UI code might wish to use this to distinguish a lock file being +// written for the first time from subsequent updates to that lock file. +func (l *Locks) Empty() bool { + return len(l.providers) == 0 +} + +// DeepCopy creates a new Locks that represents the same information as the +// receiver but does not share memory for any parts of the structure that. +// are mutable through methods on Locks. +// +// Note that this does _not_ create deep copies of parts of the structure +// that are technically mutable but are immutable by convention, such as the +// array underlying the slice of version constraints. Callers may mutate the +// resulting data structure only via the direct methods of Locks. +func (l *Locks) DeepCopy() *Locks { + ret := NewLocks() + for addr, lock := range l.providers { + var hashes []getproviders.Hash + if len(lock.hashes) > 0 { + hashes = make([]getproviders.Hash, len(lock.hashes)) + copy(hashes, lock.hashes) + } + ret.SetProvider(addr, lock.version, lock.versionConstraints, hashes) + } + return ret +} + +// ProviderLock represents lock information for a specific provider. +type ProviderLock struct { + // addr is the address of the provider this lock applies to. + addr addrs.Provider + + // version is the specific version that was previously selected, while + // versionConstraints is the constraint that was used to make that + // selection, which we can potentially use to hint to run + // e.g. terraform init -upgrade if a user has changed a version + // constraint but the previous selection still remains valid. + // "version" is therefore authoritative, while "versionConstraints" is + // just for a UI hint and not used to make any real decisions. + version getproviders.Version + versionConstraints getproviders.VersionConstraints + + // hashes contains zero or more hashes of packages or package contents + // for the package associated with the selected version across all of + // the supported platforms. + // + // hashes can contain a mixture of hashes in different formats to support + // changes over time. The new-style hash format is to have a string + // starting with "h" followed by a version number and then a colon, like + // "h1:" for the first hash format version. Other hash versions following + // this scheme may come later. These versioned hash schemes are implemented + // in the getproviders package; for example, "h1:" is implemented in + // getproviders.HashV1 . + // + // There is also a legacy hash format which is just a lowercase-hex-encoded + // SHA256 hash of the official upstream .zip file for the selected version. + // We'll allow as that a stop-gap until we can upgrade Terraform Registry + // to support the new scheme, but is non-ideal because we can verify it only + // when we have the original .zip file exactly; we can't verify a local + // directory containing the unpacked contents of that .zip file. + // + // We ideally want to populate hashes for all available platforms at + // once, by referring to the signed checksums file in the upstream + // registry. In that ideal case it's possible to later work with the same + // configuration on a different platform while still verifying the hashes. + // However, installation from any method other than an origin registry + // means we can only populate the hash for the current platform, and so + // it won't be possible to verify a subsequent installation of the same + // provider on a different platform. + hashes []getproviders.Hash +} + +// Provider returns the address of the provider this lock applies to. +func (l *ProviderLock) Provider() addrs.Provider { + return l.addr +} + +// Version returns the currently-selected version for the corresponding provider. +func (l *ProviderLock) Version() getproviders.Version { + return l.version +} + +// VersionConstraints returns the version constraints that were recorded as +// being used to choose the version returned by Version. +// +// These version constraints are not authoritative for future selections and +// are included only so Terraform can detect if the constraints in +// configuration have changed since a selection was made, and thus hint to the +// user that they may need to run terraform init -upgrade to apply the new +// constraints. +func (l *ProviderLock) VersionConstraints() getproviders.VersionConstraints { + return l.versionConstraints +} + +// AllHashes returns all of the package hashes that were recorded when this +// lock was created. If no hashes were recorded for that platform, the result +// is a zero-length slice. +// +// If your intent is to verify a package against the recorded hashes, use +// PreferredHashes to get only the hashes which the current version +// of Terraform considers the strongest of the available hashing schemes, one +// of which must match in order for verification to be considered successful. +// +// Do not modify the backing array of the returned slice. +func (l *ProviderLock) AllHashes() []getproviders.Hash { + return l.hashes +} + +// PreferredHashes returns a filtered version of the AllHashes return value +// which includes only the strongest of the availabile hash schemes, in +// case legacy hash schemes are deprecated over time but still supported for +// upgrade purposes. +// +// At least one of the given hashes must match for a package to be considered +// valud. +func (l *ProviderLock) PreferredHashes() []getproviders.Hash { + return getproviders.PreferredHashes(l.hashes) +} diff --git a/internal/terraform/depsfile/locks_file.go b/internal/terraform/depsfile/locks_file.go new file mode 100644 index 00000000..0020327d --- /dev/null +++ b/internal/terraform/depsfile/locks_file.go @@ -0,0 +1,494 @@ +package depsfile + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/getproviders" + "github.com/camptocamp/terraboard/internal/terraform/replacefile" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/hashicorp/terraform/version" +) + +// LoadLocksFromFile reads locks from the given file, expecting it to be a +// valid dependency lock file, or returns error diagnostics explaining why +// that was not possible. +// +// The returned locks are a snapshot of what was present on disk at the time +// the method was called. It does not take into account any subsequent writes +// to the file, whether through this package's functions or by external +// writers. +// +// If the returned diagnostics contains errors then the returned Locks may +// be incomplete or invalid. +func LoadLocksFromFile(filename string) (*Locks, tfdiags.Diagnostics) { + return loadLocks(func(parser *hclparse.Parser) (*hcl.File, hcl.Diagnostics) { + return parser.ParseHCLFile(filename) + }) +} + +// LoadLocksFromBytes reads locks from the given byte array, pretending that +// it was read from the given filename. +// +// The constraints and behaviors are otherwise the same as for +// LoadLocksFromFile. LoadLocksFromBytes is primarily to allow more convenient +// integration testing (avoiding creating temporary files on disk); if you +// are writing non-test code, consider whether LoadLocksFromFile might be +// more appropriate to call. +// +// It is valid to use this with dependency lock information recorded as part of +// a plan file, in which case the given filename will typically be a +// placeholder that will only be seen in the unusual case that the plan file +// contains an invalid lock file, which should only be possible if the user +// edited it directly (Terraform bugs notwithstanding). +func LoadLocksFromBytes(src []byte, filename string) (*Locks, tfdiags.Diagnostics) { + return loadLocks(func(parser *hclparse.Parser) (*hcl.File, hcl.Diagnostics) { + return parser.ParseHCL(src, filename) + }) +} + +func loadLocks(loadParse func(*hclparse.Parser) (*hcl.File, hcl.Diagnostics)) (*Locks, tfdiags.Diagnostics) { + ret := NewLocks() + + var diags tfdiags.Diagnostics + + parser := hclparse.NewParser() + f, hclDiags := loadParse(parser) + ret.sources = parser.Sources() + diags = diags.Append(hclDiags) + if f == nil { + // If we encountered an error loading the file then those errors + // should already be in diags from the above, but the file might + // also be nil itself and so we can't decode from it. + return ret, diags + } + + moreDiags := decodeLocksFromHCL(ret, f.Body) + diags = diags.Append(moreDiags) + return ret, diags +} + +// SaveLocksToFile writes the given locks object to the given file, +// entirely replacing any content already in that file, or returns error +// diagnostics explaining why that was not possible. +// +// SaveLocksToFile attempts an atomic replacement of the file, as an aid +// to external tools such as text editor integrations that might be monitoring +// the file as a signal to invalidate cached metadata. Consequently, other +// temporary files may be temporarily created in the same directory as the +// given filename during the operation. +func SaveLocksToFile(locks *Locks, filename string) tfdiags.Diagnostics { + src, diags := SaveLocksToBytes(locks) + if diags.HasErrors() { + return diags + } + + err := replacefile.AtomicWriteFile(filename, src, 0644) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update dependency lock file", + fmt.Sprintf("Error while writing new dependency lock information to %s: %s.", filename, err), + )) + return diags + } + + return diags +} + +// SaveLocksToBytes writes the given locks object into a byte array, +// using the same syntax that LoadLocksFromBytes expects to parse. +func SaveLocksToBytes(locks *Locks) ([]byte, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // In other uses of the "hclwrite" package we typically try to make + // surgical updates to the author's existing files, preserving their + // block ordering, comments, etc. We intentionally don't do that here + // to reinforce the fact that this file primarily belongs to Terraform, + // and to help ensure that VCS diffs of the file primarily reflect + // changes that actually affect functionality rather than just cosmetic + // changes, by maintaining it in a highly-normalized form. + + f := hclwrite.NewEmptyFile() + rootBody := f.Body() + + // End-users _may_ edit the lock file in exceptional situations, like + // working around potential dependency selection bugs, but we intend it + // to be primarily maintained automatically by the "terraform init" + // command. + rootBody.AppendUnstructuredTokens(hclwrite.Tokens{ + { + Type: hclsyntax.TokenComment, + Bytes: []byte("# This file is maintained automatically by \"terraform init\".\n"), + }, + { + Type: hclsyntax.TokenComment, + Bytes: []byte("# Manual edits may be lost in future updates.\n"), + }, + }) + + providers := make([]addrs.Provider, 0, len(locks.providers)) + for provider := range locks.providers { + providers = append(providers, provider) + } + sort.Slice(providers, func(i, j int) bool { + return providers[i].LessThan(providers[j]) + }) + + for _, provider := range providers { + lock := locks.providers[provider] + rootBody.AppendNewline() + block := rootBody.AppendNewBlock("provider", []string{lock.addr.String()}) + body := block.Body() + body.SetAttributeValue("version", cty.StringVal(lock.version.String())) + if constraintsStr := getproviders.VersionConstraintsString(lock.versionConstraints); constraintsStr != "" { + body.SetAttributeValue("constraints", cty.StringVal(constraintsStr)) + } + if len(lock.hashes) != 0 { + hashToks := encodeHashSetTokens(lock.hashes) + body.SetAttributeRaw("hashes", hashToks) + } + } + + return f.Bytes(), diags +} + +func decodeLocksFromHCL(locks *Locks, body hcl.Body) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + content, hclDiags := body.Content(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "provider", + LabelNames: []string{"source_addr"}, + }, + + // "module" is just a placeholder for future enhancement, so we + // can mostly-ignore the this block type we intend to add in + // future, but warn in case someone tries to use one e.g. if they + // downgraded to an earlier version of Terraform. + { + Type: "module", + LabelNames: []string{"path"}, + }, + }, + }) + diags = diags.Append(hclDiags) + + seenProviders := make(map[addrs.Provider]hcl.Range) + seenModule := false + for _, block := range content.Blocks { + + switch block.Type { + case "provider": + lock, moreDiags := decodeProviderLockFromHCL(block) + diags = diags.Append(moreDiags) + if lock == nil { + continue + } + if previousRng, exists := seenProviders[lock.addr]; exists { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider lock", + Detail: fmt.Sprintf("This lockfile already declared a lock for provider %s at %s.", lock.addr.String(), previousRng.String()), + Subject: block.TypeRange.Ptr(), + }) + continue + } + locks.providers[lock.addr] = lock + seenProviders[lock.addr] = block.DefRange + + case "module": + // We'll just take the first module block to use for a single warning, + // because that's sufficient to get the point across without swamping + // the output with warning noise. + if !seenModule { + currentVersion := version.SemVer.String() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Dependency locks for modules are not yet supported", + Detail: fmt.Sprintf("Terraform v%s only supports dependency locks for providers, not for modules. This configuration may be intended for a later version of Terraform that also supports dependency locks for modules.", currentVersion), + Subject: block.TypeRange.Ptr(), + }) + seenModule = true + } + + default: + // Shouldn't get here because this should be exhaustive for + // all of the block types in the schema above. + } + + } + + return diags +} + +func decodeProviderLockFromHCL(block *hcl.Block) (*ProviderLock, tfdiags.Diagnostics) { + ret := &ProviderLock{} + var diags tfdiags.Diagnostics + + rawAddr := block.Labels[0] + addr, moreDiags := addrs.ParseProviderSourceString(rawAddr) + if moreDiags.HasErrors() { + // The diagnostics from ParseProviderSourceString are, as the name + // suggests, written with an intended audience of someone who is + // writing a "source" attribute in a provider requirement, not + // our lock file. Therefore we're using a less helpful, fixed error + // here, which is non-ideal but hopefully okay for now because we + // don't intend end-users to typically be hand-editing these anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider source address", + Detail: "The provider source address for a provider lock must be a valid, fully-qualified address of the form \"hostname/namespace/type\".", + Subject: block.LabelRanges[0].Ptr(), + }) + return nil, diags + } + if !ProviderIsLockable(addr) { + if addr.IsBuiltIn() { + // A specialized error for built-in providers, because we have an + // explicit explanation for why those are not allowed. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider source address", + Detail: fmt.Sprintf("Cannot lock a version for built-in provider %s. Built-in providers are bundled inside Terraform itself, so you can't select a version for them independently of the Terraform release you are currently running.", addr), + Subject: block.LabelRanges[0].Ptr(), + }) + return nil, diags + } + // Otherwise, we'll use a generic error message. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider source address", + Detail: fmt.Sprintf("Provider source address %s is a special provider that is not eligible for dependency locking.", addr), + Subject: block.LabelRanges[0].Ptr(), + }) + return nil, diags + } + if canonAddr := addr.String(); canonAddr != rawAddr { + // We also require the provider addresses in the lock file to be + // written in fully-qualified canonical form, so that it's totally + // clear to a reader which provider each block relates to. Again, + // we expect hand-editing of these to be atypical so it's reasonable + // to be stricter in parsing these than we would be in the main + // configuration. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Non-normalized provider source address", + Detail: fmt.Sprintf("The provider source address for this provider lock must be written as %q, the fully-qualified and normalized form.", canonAddr), + Subject: block.LabelRanges[0].Ptr(), + }) + return nil, diags + } + + ret.addr = addr + + content, hclDiags := block.Body.Content(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "version", Required: true}, + {Name: "constraints"}, + {Name: "hashes"}, + }, + }) + diags = diags.Append(hclDiags) + + version, moreDiags := decodeProviderVersionArgument(addr, content.Attributes["version"]) + ret.version = version + diags = diags.Append(moreDiags) + + constraints, moreDiags := decodeProviderVersionConstraintsArgument(addr, content.Attributes["constraints"]) + ret.versionConstraints = constraints + diags = diags.Append(moreDiags) + + hashes, moreDiags := decodeProviderHashesArgument(addr, content.Attributes["hashes"]) + ret.hashes = hashes + diags = diags.Append(moreDiags) + + return ret, diags +} + +func decodeProviderVersionArgument(provider addrs.Provider, attr *hcl.Attribute) (getproviders.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if attr == nil { + // It's not okay to omit this argument, but the caller should already + // have generated diagnostics about that. + return getproviders.UnspecifiedVersion, diags + } + expr := attr.Expr + + var raw *string + hclDiags := gohcl.DecodeExpression(expr, nil, &raw) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return getproviders.UnspecifiedVersion, diags + } + if raw == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required argument", + Detail: "A provider lock block must contain a \"version\" argument.", + Subject: expr.Range().Ptr(), // the range for a missing argument's expression is the body's missing item range + }) + return getproviders.UnspecifiedVersion, diags + } + version, err := getproviders.ParseVersion(*raw) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider version number", + Detail: fmt.Sprintf("The selected version number for provider %s is invalid: %s.", provider, err), + Subject: expr.Range().Ptr(), + }) + } + if canon := version.String(); canon != *raw { + // Canonical forms are required in the lock file, to reduce the risk + // that a file diff will show changes that are entirely cosmetic. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider version number", + Detail: fmt.Sprintf("The selected version number for provider %s must be written in normalized form: %q.", provider, canon), + Subject: expr.Range().Ptr(), + }) + } + return version, diags +} + +func decodeProviderVersionConstraintsArgument(provider addrs.Provider, attr *hcl.Attribute) (getproviders.VersionConstraints, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if attr == nil { + // It's okay to omit this argument. + return nil, diags + } + expr := attr.Expr + + var raw string + hclDiags := gohcl.DecodeExpression(expr, nil, &raw) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return nil, diags + } + constraints, err := getproviders.ParseVersionConstraints(raw) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider version constraints", + Detail: fmt.Sprintf("The recorded version constraints for provider %s are invalid: %s.", provider, err), + Subject: expr.Range().Ptr(), + }) + } + if canon := getproviders.VersionConstraintsString(constraints); canon != raw { + // Canonical forms are required in the lock file, to reduce the risk + // that a file diff will show changes that are entirely cosmetic. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider version constraints", + Detail: fmt.Sprintf("The recorded version constraints for provider %s must be written in normalized form: %q.", provider, canon), + Subject: expr.Range().Ptr(), + }) + } + + return constraints, diags +} + +func decodeProviderHashesArgument(provider addrs.Provider, attr *hcl.Attribute) ([]getproviders.Hash, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if attr == nil { + // It's okay to omit this argument. + return nil, diags + } + expr := attr.Expr + + // We'll decode this argument using the HCL static analysis mode, because + // there's no reason for the hashes list to be dynamic and this way we can + // give more precise feedback on individual elements that are invalid, + // with direct source locations. + hashExprs, hclDiags := hcl.ExprList(expr) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return nil, diags + } + if len(hashExprs) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider hash set", + Detail: "The \"hashes\" argument must either be omitted or contain at least one hash value.", + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + ret := make([]getproviders.Hash, 0, len(hashExprs)) + for _, hashExpr := range hashExprs { + var raw string + hclDiags := gohcl.DecodeExpression(hashExpr, nil, &raw) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + continue + } + + hash, err := getproviders.ParseHash(raw) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider hash string", + Detail: fmt.Sprintf("Cannot interpret %q as a provider hash: %s.", raw, err), + Subject: expr.Range().Ptr(), + }) + continue + } + + ret = append(ret, hash) + } + + return ret, diags +} + +func encodeHashSetTokens(hashes []getproviders.Hash) hclwrite.Tokens { + // We'll generate the source code in a low-level way here (direct + // token manipulation) because it's desirable to maintain exactly + // the layout implemented here so that diffs against the locks + // file are easy to read; we don't want potential future changes to + // hclwrite to inadvertently introduce whitespace changes here. + ret := hclwrite.Tokens{ + { + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + } + + // Although lock.hashes is a slice, we de-dupe and sort it on + // initialization so it's normalized for interpretation as a logical + // set, and so we can just trust it's already in a good order here. + for _, hash := range hashes { + hashVal := cty.StringVal(hash.String()) + ret = append(ret, hclwrite.TokensForValue(hashVal)...) + ret = append(ret, hclwrite.Tokens{ + { + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }...) + } + ret = append(ret, &hclwrite.Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + + return ret +} diff --git a/internal/terraform/depsfile/locks_file_test.go b/internal/terraform/depsfile/locks_file_test.go new file mode 100644 index 00000000..8209ada6 --- /dev/null +++ b/internal/terraform/depsfile/locks_file_test.go @@ -0,0 +1,274 @@ +package depsfile + +import ( + "bufio" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/getproviders" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/google/go-cmp/cmp" +) + +func TestLoadLocksFromFile(t *testing.T) { + // For ease of test maintenance we treat every file under + // test-data/locks-files as a test case which is subject + // at least to testing that it produces an expected set + // of diagnostics represented via specially-formatted comments + // in the fixture files (which might be the empty set, if + // there are no such comments). + // + // Some of the files also have additional assertions that + // are encoded in the test code below. These must pass + // in addition to the standard diagnostics tests, if present. + files, err := ioutil.ReadDir("testdata/locks-files") + if err != nil { + t.Fatal(err.Error()) + } + + for _, info := range files { + testName := filepath.Base(info.Name()) + filename := filepath.Join("testdata/locks-files", testName) + t.Run(testName, func(t *testing.T) { + f, err := os.Open(filename) + if err != nil { + t.Fatal(err.Error()) + } + defer f.Close() + const errorPrefix = "# ERROR: " + const warningPrefix = "# WARNING: " + wantErrors := map[int]string{} + wantWarnings := map[int]string{} + sc := bufio.NewScanner(f) + lineNum := 1 + for sc.Scan() { + l := sc.Text() + if pos := strings.Index(l, errorPrefix); pos != -1 { + wantSummary := l[pos+len(errorPrefix):] + wantErrors[lineNum] = wantSummary + } + if pos := strings.Index(l, warningPrefix); pos != -1 { + wantSummary := l[pos+len(warningPrefix):] + wantWarnings[lineNum] = wantSummary + } + lineNum++ + } + if err := sc.Err(); err != nil { + t.Fatal(err.Error()) + } + + locks, diags := LoadLocksFromFile(filename) + gotErrors := map[int]string{} + gotWarnings := map[int]string{} + for _, diag := range diags { + summary := diag.Description().Summary + if diag.Source().Subject == nil { + // We don't expect any sourceless diagnostics here. + t.Errorf("unexpected sourceless diagnostic: %s", summary) + continue + } + lineNum := diag.Source().Subject.Start.Line + switch sev := diag.Severity(); sev { + case tfdiags.Error: + gotErrors[lineNum] = summary + case tfdiags.Warning: + gotWarnings[lineNum] = summary + default: + t.Errorf("unexpected diagnostic severity %s", sev) + } + } + + if diff := cmp.Diff(wantErrors, gotErrors); diff != "" { + t.Errorf("wrong errors\n%s", diff) + } + if diff := cmp.Diff(wantWarnings, gotWarnings); diff != "" { + t.Errorf("wrong warnings\n%s", diff) + } + + switch testName { + // These are the file-specific test assertions. Not all files + // need custom test assertions in addition to the standard + // diagnostics assertions implemented above, so the cases here + // don't need to be exhaustive for all files. + // + // Please keep these in alphabetical order so the list is easy + // to scan! + + case "empty.hcl": + if got, want := len(locks.providers), 0; got != want { + t.Errorf("wrong number of providers %d; want %d", got, want) + } + + case "valid-provider-locks.hcl": + if got, want := len(locks.providers), 3; got != want { + t.Errorf("wrong number of providers %d; want %d", got, want) + } + + t.Run("version-only", func(t *testing.T) { + if lock := locks.Provider(addrs.MustParseProviderSourceString("terraform.io/test/version-only")); lock != nil { + if got, want := lock.Version().String(), "1.0.0"; got != want { + t.Errorf("wrong version\ngot: %s\nwant: %s", got, want) + } + if got, want := getproviders.VersionConstraintsString(lock.VersionConstraints()), ""; got != want { + t.Errorf("wrong version constraints\ngot: %s\nwant: %s", got, want) + } + if got, want := len(lock.hashes), 0; got != want { + t.Errorf("wrong number of hashes %d; want %d", got, want) + } + } + }) + + t.Run("version-and-constraints", func(t *testing.T) { + if lock := locks.Provider(addrs.MustParseProviderSourceString("terraform.io/test/version-and-constraints")); lock != nil { + if got, want := lock.Version().String(), "1.2.0"; got != want { + t.Errorf("wrong version\ngot: %s\nwant: %s", got, want) + } + if got, want := getproviders.VersionConstraintsString(lock.VersionConstraints()), "~> 1.2"; got != want { + t.Errorf("wrong version constraints\ngot: %s\nwant: %s", got, want) + } + if got, want := len(lock.hashes), 0; got != want { + t.Errorf("wrong number of hashes %d; want %d", got, want) + } + } + }) + + t.Run("all-the-things", func(t *testing.T) { + if lock := locks.Provider(addrs.MustParseProviderSourceString("terraform.io/test/all-the-things")); lock != nil { + if got, want := lock.Version().String(), "3.0.10"; got != want { + t.Errorf("wrong version\ngot: %s\nwant: %s", got, want) + } + if got, want := getproviders.VersionConstraintsString(lock.VersionConstraints()), ">= 3.0.2"; got != want { + t.Errorf("wrong version constraints\ngot: %s\nwant: %s", got, want) + } + wantHashes := []getproviders.Hash{ + getproviders.MustParseHash("test:placeholder-hash-1"), + getproviders.MustParseHash("test:placeholder-hash-2"), + getproviders.MustParseHash("test:placeholder-hash-3"), + } + if diff := cmp.Diff(wantHashes, lock.hashes); diff != "" { + t.Errorf("wrong hashes\n%s", diff) + } + } + }) + } + }) + } +} + +func TestLoadLocksFromFileAbsent(t *testing.T) { + t.Run("lock file is a directory", func(t *testing.T) { + // This can never happen when Terraform is the one generating the + // lock file, but might arise if the user makes a directory with the + // lock file's name for some reason. (There is no actual reason to do + // so, so that would always be a mistake.) + locks, diags := LoadLocksFromFile("testdata") + if len(locks.providers) != 0 { + t.Errorf("returned locks has providers; expected empty locks") + } + if !diags.HasErrors() { + t.Fatalf("LoadLocksFromFile succeeded; want error") + } + // This is a generic error message from HCL itself, so upgrading HCL + // in future might cause a different error message here. + want := `Failed to read file: The configuration file "testdata" could not be read.` + got := diags.Err().Error() + if got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("lock file doesn't exist", func(t *testing.T) { + locks, diags := LoadLocksFromFile("testdata/nonexist.hcl") + if len(locks.providers) != 0 { + t.Errorf("returned locks has providers; expected empty locks") + } + if !diags.HasErrors() { + t.Fatalf("LoadLocksFromFile succeeded; want error") + } + // This is a generic error message from HCL itself, so upgrading HCL + // in future might cause a different error message here. + want := `Failed to read file: The configuration file "testdata/nonexist.hcl" could not be read.` + got := diags.Err().Error() + if got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestSaveLocksToFile(t *testing.T) { + locks := NewLocks() + + fooProvider := addrs.MustParseProviderSourceString("test/foo") + barProvider := addrs.MustParseProviderSourceString("test/bar") + bazProvider := addrs.MustParseProviderSourceString("test/baz") + booProvider := addrs.MustParseProviderSourceString("test/boo") + oneDotOh := getproviders.MustParseVersion("1.0.0") + oneDotTwo := getproviders.MustParseVersion("1.2.0") + atLeastOneDotOh := getproviders.MustParseVersionConstraints(">= 1.0.0") + pessimisticOneDotOh := getproviders.MustParseVersionConstraints("~> 1") + abbreviatedOneDotTwo := getproviders.MustParseVersionConstraints("1.2") + hashes := []getproviders.Hash{ + getproviders.MustParseHash("test:cccccccccccccccccccccccccccccccccccccccccccccccc"), + getproviders.MustParseHash("test:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), + getproviders.MustParseHash("test:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + } + locks.SetProvider(fooProvider, oneDotOh, atLeastOneDotOh, hashes) + locks.SetProvider(barProvider, oneDotTwo, pessimisticOneDotOh, nil) + locks.SetProvider(bazProvider, oneDotTwo, nil, nil) + locks.SetProvider(booProvider, oneDotTwo, abbreviatedOneDotTwo, nil) + + dir := t.TempDir() + + filename := filepath.Join(dir, LockFilePath) + diags := SaveLocksToFile(locks, filename) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + fileInfo, err := os.Stat(filename) + if err != nil { + t.Fatalf(err.Error()) + } + if mode := fileInfo.Mode(); mode&0111 != 0 { + t.Fatalf("Expected lock file to be non-executable: %o", mode) + } + + gotContentBytes, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf(err.Error()) + } + gotContent := string(gotContentBytes) + wantContent := `# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/test/bar" { + version = "1.2.0" + constraints = "~> 1.0" +} + +provider "registry.terraform.io/test/baz" { + version = "1.2.0" +} + +provider "registry.terraform.io/test/boo" { + version = "1.2.0" + constraints = "1.2.0" +} + +provider "registry.terraform.io/test/foo" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "test:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "test:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "test:cccccccccccccccccccccccccccccccccccccccccccccccc", + ] +} +` + if diff := cmp.Diff(wantContent, gotContent); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} diff --git a/internal/terraform/depsfile/locks_test.go b/internal/terraform/depsfile/locks_test.go new file mode 100644 index 00000000..f5283651 --- /dev/null +++ b/internal/terraform/depsfile/locks_test.go @@ -0,0 +1,218 @@ +package depsfile + +import ( + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/getproviders" + "github.com/google/go-cmp/cmp" +) + +func TestLocksEqual(t *testing.T) { + boopProvider := addrs.NewDefaultProvider("boop") + v2 := getproviders.MustParseVersion("2.0.0") + v2LocalBuild := getproviders.MustParseVersion("2.0.0+awesomecorp.1") + v2GtConstraints := getproviders.MustParseVersionConstraints(">= 2.0.0") + v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") + hash1 := getproviders.HashScheme("test").New("1") + hash2 := getproviders.HashScheme("test").New("2") + hash3 := getproviders.HashScheme("test").New("3") + + equalBothWays := func(t *testing.T, a, b *Locks) { + t.Helper() + if !a.Equal(b) { + t.Errorf("a should be equal to b") + } + if !b.Equal(a) { + t.Errorf("b should be equal to a") + } + } + nonEqualBothWays := func(t *testing.T, a, b *Locks) { + t.Helper() + if a.Equal(b) { + t.Errorf("a should be equal to b") + } + if b.Equal(a) { + t.Errorf("b should be equal to a") + } + } + + t.Run("both empty", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + equalBothWays(t, a, b) + }) + t.Run("an extra provider lock", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + b.SetProvider(boopProvider, v2, v2GtConstraints, nil) + nonEqualBothWays(t, a, b) + }) + t.Run("both have boop provider with same version", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + // Note: the constraints are not part of the definition of "Equal", so they can differ + a.SetProvider(boopProvider, v2, v2GtConstraints, nil) + b.SetProvider(boopProvider, v2, v2EqConstraints, nil) + equalBothWays(t, a, b) + }) + t.Run("both have boop provider with different versions", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + a.SetProvider(boopProvider, v2, v2EqConstraints, nil) + b.SetProvider(boopProvider, v2LocalBuild, v2EqConstraints, nil) + nonEqualBothWays(t, a, b) + }) + t.Run("both have boop provider with same version and same hashes", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + hashes := []getproviders.Hash{hash1, hash2, hash3} + a.SetProvider(boopProvider, v2, v2EqConstraints, hashes) + b.SetProvider(boopProvider, v2, v2EqConstraints, hashes) + equalBothWays(t, a, b) + }) + t.Run("both have boop provider with same version but different hashes", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + hashesA := []getproviders.Hash{hash1, hash2} + hashesB := []getproviders.Hash{hash1, hash3} + a.SetProvider(boopProvider, v2, v2EqConstraints, hashesA) + b.SetProvider(boopProvider, v2, v2EqConstraints, hashesB) + nonEqualBothWays(t, a, b) + }) +} + +func TestLocksEqualProviderAddress(t *testing.T) { + boopProvider := addrs.NewDefaultProvider("boop") + v2 := getproviders.MustParseVersion("2.0.0") + v2LocalBuild := getproviders.MustParseVersion("2.0.0+awesomecorp.1") + v2GtConstraints := getproviders.MustParseVersionConstraints(">= 2.0.0") + v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") + hash1 := getproviders.HashScheme("test").New("1") + hash2 := getproviders.HashScheme("test").New("2") + hash3 := getproviders.HashScheme("test").New("3") + + equalProviderAddressBothWays := func(t *testing.T, a, b *Locks) { + t.Helper() + if !a.EqualProviderAddress(b) { + t.Errorf("a should be equal to b") + } + if !b.EqualProviderAddress(a) { + t.Errorf("b should be equal to a") + } + } + nonEqualProviderAddressBothWays := func(t *testing.T, a, b *Locks) { + t.Helper() + if a.EqualProviderAddress(b) { + t.Errorf("a should be equal to b") + } + if b.EqualProviderAddress(a) { + t.Errorf("b should be equal to a") + } + } + + t.Run("both empty", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + equalProviderAddressBothWays(t, a, b) + }) + t.Run("an extra provider lock", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + b.SetProvider(boopProvider, v2, v2GtConstraints, nil) + nonEqualProviderAddressBothWays(t, a, b) + }) + t.Run("both have boop provider with different versions", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + a.SetProvider(boopProvider, v2, v2EqConstraints, nil) + b.SetProvider(boopProvider, v2LocalBuild, v2EqConstraints, nil) + equalProviderAddressBothWays(t, a, b) + }) + t.Run("both have boop provider with same version but different hashes", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + hashesA := []getproviders.Hash{hash1, hash2} + hashesB := []getproviders.Hash{hash1, hash3} + a.SetProvider(boopProvider, v2, v2EqConstraints, hashesA) + b.SetProvider(boopProvider, v2, v2EqConstraints, hashesB) + equalProviderAddressBothWays(t, a, b) + }) +} + +func TestLocksProviderSetRemove(t *testing.T) { + beepProvider := addrs.NewDefaultProvider("beep") + boopProvider := addrs.NewDefaultProvider("boop") + v2 := getproviders.MustParseVersion("2.0.0") + v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") + v2GtConstraints := getproviders.MustParseVersionConstraints(">= 2.0.0") + hash := getproviders.HashScheme("test").New("1") + + locks := NewLocks() + if got, want := len(locks.AllProviders()), 0; got != want { + t.Fatalf("fresh locks object already has providers") + } + + locks.SetProvider(boopProvider, v2, v2EqConstraints, []getproviders.Hash{hash}) + { + got := locks.AllProviders() + want := map[addrs.Provider]*ProviderLock{ + boopProvider: { + addr: boopProvider, + version: v2, + versionConstraints: v2EqConstraints, + hashes: []getproviders.Hash{hash}, + }, + } + if diff := cmp.Diff(want, got, ProviderLockComparer); diff != "" { + t.Fatalf("wrong providers after SetProvider boop\n%s", diff) + } + } + + locks.SetProvider(beepProvider, v2, v2GtConstraints, []getproviders.Hash{hash}) + { + got := locks.AllProviders() + want := map[addrs.Provider]*ProviderLock{ + boopProvider: { + addr: boopProvider, + version: v2, + versionConstraints: v2EqConstraints, + hashes: []getproviders.Hash{hash}, + }, + beepProvider: { + addr: beepProvider, + version: v2, + versionConstraints: v2GtConstraints, + hashes: []getproviders.Hash{hash}, + }, + } + if diff := cmp.Diff(want, got, ProviderLockComparer); diff != "" { + t.Fatalf("wrong providers after SetProvider beep\n%s", diff) + } + } + + locks.RemoveProvider(boopProvider) + { + got := locks.AllProviders() + want := map[addrs.Provider]*ProviderLock{ + beepProvider: { + addr: beepProvider, + version: v2, + versionConstraints: v2GtConstraints, + hashes: []getproviders.Hash{hash}, + }, + } + if diff := cmp.Diff(want, got, ProviderLockComparer); diff != "" { + t.Fatalf("wrong providers after RemoveProvider boop\n%s", diff) + } + } + + locks.RemoveProvider(beepProvider) + { + got := locks.AllProviders() + want := map[addrs.Provider]*ProviderLock{} + if diff := cmp.Diff(want, got, ProviderLockComparer); diff != "" { + t.Fatalf("wrong providers after RemoveProvider beep\n%s", diff) + } + } +} diff --git a/internal/terraform/depsfile/paths.go b/internal/terraform/depsfile/paths.go new file mode 100644 index 00000000..252f67e2 --- /dev/null +++ b/internal/terraform/depsfile/paths.go @@ -0,0 +1,18 @@ +package depsfile + +// LockFilePath is the path, relative to a configuration's root module +// directory, where Terraform expects to find the dependency lock file for +// that configuration. +// +// This file is intended to be kept in version control, so it lives directly +// in the root module directory. The ".terraform" prefix is intended to +// suggest that it's metadata about several types of objects that ultimately +// end up in the .terraform directory after running "terraform init". +const LockFilePath = ".terraform.lock.hcl" + +// DevOverrideFilePath is the path, relative to a configuration's root module +// directory, where Terraform will look to find a possible override file that +// represents a request to temporarily (within a single working directory only) +// use specific local directories in place of packages that would normally +// need to be installed from a remote location. +const DevOverrideFilePath = ".terraform/dev-overrides.hcl" diff --git a/internal/terraform/depsfile/testdata/locks-files/empty.hcl b/internal/terraform/depsfile/testdata/locks-files/empty.hcl new file mode 100644 index 00000000..66169c2e --- /dev/null +++ b/internal/terraform/depsfile/testdata/locks-files/empty.hcl @@ -0,0 +1 @@ +# An empty locks file is a bit of a degenerate case, but it is valid. diff --git a/internal/terraform/depsfile/testdata/locks-files/invalid-provider-addrs.hcl b/internal/terraform/depsfile/testdata/locks-files/invalid-provider-addrs.hcl new file mode 100644 index 00000000..d5d2cdbb --- /dev/null +++ b/internal/terraform/depsfile/testdata/locks-files/invalid-provider-addrs.hcl @@ -0,0 +1,44 @@ +provider "" { # ERROR: Invalid provider source address + +} + +provider "hashicorp/aws" { # ERROR: Non-normalized provider source address + +} + +provider "aws" { # ERROR: Non-normalized provider source address + +} + +provider "too/many/parts/here" { # ERROR: Invalid provider source address + +} + +provider "Registry.terraform.io/example/example" { # ERROR: Non-normalized provider source address + +} + +provider "registry.terraform.io/eXample/example" { # ERROR: Non-normalized provider source address + +} + +provider "registry.terraform.io/example/Example" { # ERROR: Non-normalized provider source address + +} + +provider "this/one/okay" { + version = "1.0.0" +} + +provider "this/one/okay" { # ERROR: Duplicate provider lock +} + +# Legacy providers are not allowed, because they existed only to +# support the Terraform 0.13 upgrade process. +provider "registry.terraform.io/-/null" { # ERROR: Invalid provider source address +} + +# Built-in providers are not allowed, because they are not versioned +# independently of the Terraform CLI release they are embedded in. +provider "terraform.io/builtin/foo" { # ERROR: Invalid provider source address +} diff --git a/internal/terraform/depsfile/testdata/locks-files/invalid-versions.hcl b/internal/terraform/depsfile/testdata/locks-files/invalid-versions.hcl new file mode 100644 index 00000000..d1e96474 --- /dev/null +++ b/internal/terraform/depsfile/testdata/locks-files/invalid-versions.hcl @@ -0,0 +1,30 @@ +provider "terraform.io/test/foo" { + version = "" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/bar" { + # The "v" prefix is not expected here + version = "v1.0.0" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/baz" { + # Must be written in the canonical form, with three parts + version = "1.0" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/boop" { + # Must be written in the canonical form, with three parts + version = "1" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/blep" { + # Mustn't use redundant extra zero padding + version = "1.02" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/huzzah" { # ERROR: Missing required argument +} + +provider "terraform.io/test/huzznot" { + version = null # ERROR: Missing required argument +} diff --git a/internal/terraform/depsfile/testdata/locks-files/unsupported-block.hcl b/internal/terraform/depsfile/testdata/locks-files/unsupported-block.hcl new file mode 100644 index 00000000..41321fca --- /dev/null +++ b/internal/terraform/depsfile/testdata/locks-files/unsupported-block.hcl @@ -0,0 +1,2 @@ +doodad "blah" { # ERROR: Unsupported block type +} diff --git a/internal/terraform/depsfile/testdata/locks-files/valid-provider-locks.hcl b/internal/terraform/depsfile/testdata/locks-files/valid-provider-locks.hcl new file mode 100644 index 00000000..1d191160 --- /dev/null +++ b/internal/terraform/depsfile/testdata/locks-files/valid-provider-locks.hcl @@ -0,0 +1,20 @@ + +provider "terraform.io/test/version-only" { + version = "1.0.0" +} + +provider "terraform.io/test/version-and-constraints" { + version = "1.2.0" + constraints = "~> 1.2" +} + +provider "terraform.io/test/all-the-things" { + version = "3.0.10" + constraints = ">= 3.0.2" + + hashes = [ + "test:placeholder-hash-1", + "test:placeholder-hash-2", + "test:placeholder-hash-3", + ] +} diff --git a/internal/terraform/depsfile/testing.go b/internal/terraform/depsfile/testing.go new file mode 100644 index 00000000..cf8965be --- /dev/null +++ b/internal/terraform/depsfile/testing.go @@ -0,0 +1,22 @@ +package depsfile + +import ( + "github.com/google/go-cmp/cmp" +) + +// ProviderLockComparer is an option for github.com/google/go-cmp/cmp that +// specifies how to compare values of type depsfile.ProviderLock. +// +// Use this, rather than crafting comparison options yourself, in case the +// comparison strategy needs to change in future due to implementation details +// of the ProviderLock type. +var ProviderLockComparer cmp.Option + +func init() { + // For now, direct comparison of the unexported fields is good enough + // because we store everything in a normalized form. If that changes + // later then we might need to write a custom transformer to a hidden + // type with exported fields, so we can retain the ability for cmp to + // still report differences deeply. + ProviderLockComparer = cmp.AllowUnexported(ProviderLock{}) +} diff --git a/internal/terraform/experiments/experiment.go b/internal/terraform/experiments/experiment.go index ee73ca23..b5d67f58 100644 --- a/internal/terraform/experiments/experiment.go +++ b/internal/terraform/experiments/experiment.go @@ -16,6 +16,8 @@ const ( VariableValidation = Experiment("variable_validation") ModuleVariableOptionalAttrs = Experiment("module_variable_optional_attrs") SuppressProviderSensitiveAttrs = Experiment("provider_sensitive_attrs") + ConfigDrivenMove = Experiment("config_driven_move") + PreconditionsPostconditions = Experiment("preconditions_postconditions") ) func init() { @@ -23,6 +25,8 @@ func init() { // a current or a concluded experiment. registerConcludedExperiment(VariableValidation, "Custom variable validation can now be used by default, without enabling an experiment.") registerConcludedExperiment(SuppressProviderSensitiveAttrs, "Provider-defined sensitive attributes are now redacted by default, without enabling an experiment.") + registerConcludedExperiment(ConfigDrivenMove, "Declarations of moved resource instances using \"moved\" blocks can now be used by default, without enabling an experiment.") + registerConcludedExperiment(PreconditionsPostconditions, "Condition blocks can now be used by default, without enabling an experiment.") registerCurrentExperiment(ModuleVariableOptionalAttrs) } diff --git a/internal/terraform/getmodules/doc.go b/internal/terraform/getmodules/doc.go new file mode 100644 index 00000000..9c729984 --- /dev/null +++ b/internal/terraform/getmodules/doc.go @@ -0,0 +1,8 @@ +// Package getmodules contains the low-level functionality for fetching +// remote module packages. It's essentially just a thin wrapper around +// go-getter. +// +// This package is only for remote module source addresses, not for local +// or registry source addresses. The other address types are handled +// elsewhere. +package getmodules diff --git a/internal/terraform/getmodules/file_detector.go b/internal/terraform/getmodules/file_detector.go new file mode 100644 index 00000000..b28e2cdc --- /dev/null +++ b/internal/terraform/getmodules/file_detector.go @@ -0,0 +1,65 @@ +package getmodules + +import ( + "fmt" + "path/filepath" + "runtime" +) + +// fileDetector is a replacement for go-getter's own file detector which +// better meets Terraform's needs: specifically, it rejects relative filesystem +// paths with a somewhat-decent error message. +// +// This is a replacement for some historical hackery we did where we tried to +// avoid calling into go-getter altogether in this situation. This is, +// therefore, a copy of getter.FileDetector but with the "not absolute path" +// case replaced with a similar result as Terraform's old heuristic would've +// returned: a custom error type that the caller can react to in order to +// produce a hint error message if desired. +type fileDetector struct{} + +func (d *fileDetector) Detect(src, pwd string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if !filepath.IsAbs(src) { + return "", true, &MaybeRelativePathErr{src} + } + + return fmtFileURL(src), true, nil +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + return fmt.Sprintf("file://%s", path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + return fmt.Sprintf("file:///%s", path) +} + +// MaybeRelativePathErr is the error type returned by NormalizePackageAddress +// if the source address looks like it might be intended to be a relative +// filesystem path but without the required "./" or "../" prefix. +// +// Specifically, NormalizePackageAddress will return a pointer to this type, +// so the error type will be *MaybeRelativePathErr. +// +// It has a name starting with "Maybe" because in practice we can get here +// with any string that isn't recognized as one of the supported schemes: +// treating the address as a local filesystem path is our fallback for when +// everything else fails, but it could just as easily be a typo in an attempt +// to use one of the other schemes and thus not a filesystem path at all. +type MaybeRelativePathErr struct { + Addr string +} + +func (e *MaybeRelativePathErr) Error() string { + return fmt.Sprintf("Terraform cannot detect a supported external module source type for %s", e.Addr) +} diff --git a/internal/terraform/getmodules/getter.go b/internal/terraform/getmodules/getter.go new file mode 100644 index 00000000..40e230aa --- /dev/null +++ b/internal/terraform/getmodules/getter.go @@ -0,0 +1,163 @@ +package getmodules + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/camptocamp/terraboard/internal/terraform/copy" + cleanhttp "github.com/hashicorp/go-cleanhttp" + getter "github.com/hashicorp/go-getter" +) + +// We configure our own go-getter detector and getter sets here, because +// the set of sources we support is part of Terraform's documentation and +// so we don't want any new sources introduced in go-getter to sneak in here +// and work even though they aren't documented. This also insulates us from +// any meddling that might be done by other go-getter callers linked into our +// executable. +// +// Note that over time we've found go-getter's design to be not wholly fit +// for Terraform's purposes in various ways, and so we're continuing to use +// it here because our backward compatibility with earlier versions depends +// on it, but we use go-getter very carefully and always only indirectly via +// the public API of this package so that we can get the subset of the +// go-getter functionality we need while working around some of the less +// helpful parts of its design. See the comments in various other functions +// in this package which call into go-getter for more information on what +// tradeoffs we're making here. + +var goGetterDetectors = []getter.Detector{ + new(getter.GitHubDetector), + new(getter.GitDetector), + + // Because historically BitBucket supported both Git and Mercurial + // repositories but used the same repository URL syntax for both, + // this detector takes the unusual step of actually reaching out + // to the BitBucket API to recognize the repository type. That + // means there's the possibility of an outgoing network request + // inside what is otherwise normally just a local string manipulation + // operation, but we continue to accept this for now. + // + // Perhaps a future version of go-getter will remove the check now + // that BitBucket only supports Git anyway. Aside from this historical + // exception, we should avoid adding any new detectors that make network + // requests in here, and limit ourselves only to ones that can operate + // entirely through local string manipulation. + new(getter.BitBucketDetector), + + new(getter.GCSDetector), + new(getter.S3Detector), + new(fileDetector), +} + +var goGetterNoDetectors = []getter.Detector{} + +var goGetterDecompressors = map[string]getter.Decompressor{ + "bz2": new(getter.Bzip2Decompressor), + "gz": new(getter.GzipDecompressor), + "xz": new(getter.XzDecompressor), + "zip": new(getter.ZipDecompressor), + + "tar.bz2": new(getter.TarBzip2Decompressor), + "tar.tbz2": new(getter.TarBzip2Decompressor), + + "tar.gz": new(getter.TarGzipDecompressor), + "tgz": new(getter.TarGzipDecompressor), + + "tar.xz": new(getter.TarXzDecompressor), + "txz": new(getter.TarXzDecompressor), +} + +var goGetterGetters = map[string]getter.Getter{ + "file": new(getter.FileGetter), + "gcs": new(getter.GCSGetter), + "git": new(getter.GitGetter), + "hg": new(getter.HgGetter), + "s3": new(getter.S3Getter), + "http": getterHTTPGetter, + "https": getterHTTPGetter, +} + +var getterHTTPClient = cleanhttp.DefaultClient() + +var getterHTTPGetter = &getter.HttpGetter{ + Client: getterHTTPClient, + Netrc: true, +} + +// A reusingGetter is a helper for the module installer that remembers +// the final resolved addresses of all of the sources it has already been +// asked to install, and will copy from a prior installation directory if +// it has the same resolved source address. +// +// The keys in a reusingGetter are the normalized (post-detection) package +// addresses, and the values are the paths where each source was previously +// installed. (Users of this map should treat the keys as addrs.ModulePackage +// values, but we can't type them that way because the addrs package +// imports getmodules in order to indirectly access our go-getter +// configuration.) +type reusingGetter map[string]string + +// getWithGoGetter fetches the package at the given address into the given +// target directory. The given address must already be in normalized form +// (using NormalizePackageAddress) or else the behavior is undefined. +// +// This function deals only in entire packages, so it's always the caller's +// responsibility to handle any subdirectory specification and select a +// suitable subdirectory of the given installation directory after installation +// has succeeded. +// +// This function would ideally accept packageAddr as a value of type +// addrs.ModulePackage, but we can't do that because the addrs package +// depends on this package for package address parsing. Therefore we just +// use a string here but assume that the caller got that value by calling +// the String method on a valid addrs.ModulePackage value. +// +// The errors returned by this function are those surfaced by the underlying +// go-getter library, which have very inconsistent quality as +// end-user-actionable error messages. At this time we do not have any +// reasonable way to improve these error messages at this layer because +// the underlying errors are not separately recognizable. +func (g reusingGetter) getWithGoGetter(ctx context.Context, instPath, packageAddr string) error { + var err error + + if prevDir, exists := g[packageAddr]; exists { + log.Printf("[TRACE] getmodules: copying previous install of %q from %s to %s", packageAddr, prevDir, instPath) + err := os.Mkdir(instPath, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory %s: %s", instPath, err) + } + err = copy.CopyDir(instPath, prevDir) + if err != nil { + return fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) + } + } else { + log.Printf("[TRACE] getmodules: fetching %q to %q", packageAddr, instPath) + client := getter.Client{ + Src: packageAddr, + Dst: instPath, + Pwd: instPath, + + Mode: getter.ClientModeDir, + + Detectors: goGetterNoDetectors, // our caller should've already done detection + Decompressors: goGetterDecompressors, + Getters: goGetterGetters, + Ctx: ctx, + } + err = client.Get() + if err != nil { + return err + } + // Remember where we installed this so we might reuse this directory + // on subsequent calls to avoid re-downloading. + g[packageAddr] = instPath + } + + // If we get down here then we've either downloaded the package or + // copied a previous tree we downloaded, and so either way we should + // have got the full module package structure written into instPath. + return nil +} diff --git a/internal/terraform/getmodules/installer.go b/internal/terraform/getmodules/installer.go new file mode 100644 index 00000000..657c0e73 --- /dev/null +++ b/internal/terraform/getmodules/installer.go @@ -0,0 +1,44 @@ +package getmodules + +import ( + "context" +) + +// PackageFetcher is a low-level utility for fetching remote module packages +// into local filesystem directories in preparation for use by higher-level +// module installer functionality implemented elsewhere. +// +// A PackageFetcher works only with entire module packages and never with +// the individual modules within a package. +// +// A particular PackageFetcher instance remembers the target directory of +// any successfully-installed package so that it can optimize future calls +// that have the same package address by copying the local directory tree, +// rather than fetching the package from its origin repeatedly. There is +// no way to reset this cache, so a particular PackageFetcher instance should +// live only for the duration of a single initialization process. +type PackageFetcher struct { + getter reusingGetter +} + +func NewPackageFetcher() *PackageFetcher { + return &PackageFetcher{ + getter: reusingGetter{}, + } +} + +// FetchPackage downloads or otherwise retrieves the filesystem inside the +// package at the given address into the given local installation directory. +// +// packageAddr must be formatted as if it were the result of an +// addrs.ModulePackage.String() call. It's only defined as a raw string here +// because the getmodules package can't import the addrs package due to +// that creating a package dependency cycle. +// +// PackageFetcher only works with entire packages. If the caller is processing +// a module source address which includes a subdirectory portion then the +// caller must resolve that itself, possibly with the help of the +// getmodules.SplitPackageSubdir and getmodules.ExpandSubdirGlobs functions. +func (f *PackageFetcher) FetchPackage(ctx context.Context, instDir string, packageAddr string) error { + return f.getter.getWithGoGetter(ctx, instDir, packageAddr) +} diff --git a/internal/terraform/getmodules/package.go b/internal/terraform/getmodules/package.go new file mode 100644 index 00000000..a4606562 --- /dev/null +++ b/internal/terraform/getmodules/package.go @@ -0,0 +1,69 @@ +package getmodules + +import ( + getter "github.com/hashicorp/go-getter" +) + +// NormalizePackageAddress uses the go-getter "detector" functionality in +// order to turn a user-supplied source address into a normalized address +// which always includes a prefix naming a protocol to fetch with and may +// also include a transformed/normalized version of the protocol-specific +// source address included afterward. +// +// This is part of the implementation of addrs.ParseModulePackage and of +// addrs.ParseModuleSource, so for most callers it'd be better to call +// one of those other functions instead. The addrs package can potentially +// perform other processing in addition to just the go-getter detection. +// +// Note that this function expects to recieve only a package address, not +// a full source address that might also include a subdirectory portion. +// The caller must trim off any subdirectory portion using +// getmodules.SplitPackageSubdir before calling this function, passing in +// just the packageAddr return value, or the result will be incorrect. +// +// The detectors in go-getter can potentially introduce their own +// package subdirectory portions. If that happens then this function will +// return the subdirectory portion as a non-empty subDir return value, +// which the caller must then use as a prefix for any subDir it already +// extracted from the user's given package address. +// +// Some of go-getter's detectors make outgoing HTTP requests, and so +// the behavior of this function may depend on the network connectivity +// of the system where Terraform is running. However, most of the getters +// we use are local-only, and so HTTP requests are only for some ambiguous +// edge-cases, such as the BitBucket detector which has a mechanism to +// detect whether to use Git or Mercurial, because earlier versions of +// BitBucket used to support both. +func NormalizePackageAddress(given string) (packageAddr, subDir string, err error) { + // Because we're passing go-getter no base directory here, the file + // detector will return an error if the user entered a relative filesystem + // path without a "../" or "./" prefix and thus ended up in here. + // + // go-getter's error message for that case is very poor, and so we'll + // try to heuristically detect that situation and return a better error + // message. + + // NOTE: We're passing an empty string to the "current working directory" + // here because that's only relevant for relative filesystem paths, + // but Terraform handles relative filesystem paths itself outside of + // go-getter and so it'd always be an error to pass one into here. + // go-getter's "file" detector returns an error if it encounters a + // relative path when the pwd argument is empty. + // + // (Absolute filesystem paths _are_ valid though, for annoying historical + // reasons, and we treat them as remote packages even though "downloading" + // them just means a recursive copy of the source directory tree.) + + result, err := getter.Detect(given, "", goGetterDetectors) + if err != nil { + // NOTE: go-getter's error messages are of very inconsistent quality + // and many are not suitable for an end-user audience, but they are all + // just strings and so we can't really do any sort of post-processing + // to improve them and thus we just accept some bad error messages for + // now. + return "", "", err + } + + packageAddr, subDir = SplitPackageSubdir(result) + return packageAddr, subDir, nil +} diff --git a/internal/terraform/getmodules/subdir.go b/internal/terraform/getmodules/subdir.go new file mode 100644 index 00000000..38d398f7 --- /dev/null +++ b/internal/terraform/getmodules/subdir.go @@ -0,0 +1,57 @@ +package getmodules + +import ( + "path" + + getter "github.com/hashicorp/go-getter" +) + +// SplitPackageSubdir detects whether the given address string has a +// subdirectory portion, and if so returns a non-empty subDir string +// along with the trimmed package address. +// +// If the given string doesn't have a subdirectory portion then it'll +// just be returned verbatim in packageAddr, with an empty subDir value. +// +// Although the rest of this package is focused only on direct remote +// module packages, this particular function and its companion +// ExpandSubdirGlobs are both also relevant for registry-based module +// addresses, because a registry translates such an address into a +// remote module package address and thus can contribute its own +// additions to the final subdirectory selection. +func SplitPackageSubdir(given string) (packageAddr, subDir string) { + // We delegate this mostly to go-getter, because older Terraform + // versions just used go-getter directly and so we need to preserve + // its various quirks for compatibility reasons. + // + // However, note that in Terraform we _always_ split off the subdirectory + // portion and handle it within Terraform-level code, _never_ passing + // a subdirectory portion down into go-getter's own Get function, because + // Terraform's ability to refer between local paths inside the same + // package depends on Terraform itself always being aware of where the + // package's root directory ended up on disk, and always needs the + // package installed wholesale. + packageAddr, subDir = getter.SourceDirSubdir(given) + if subDir != "" { + subDir = path.Clean(subDir) + } + return packageAddr, subDir +} + +// ExpandSubdirGlobs handles a subdir string that might contain glob syntax, +// turning it into a concrete subdirectory path by referring to the actual +// files on disk in the given directory which we assume contains the content +// of whichever package this is a subdirectory glob for. +// +// Subdir globs are used, for example, when a module registry wants to specify +// to select the contents of the single directory at the root of a conventional +// tar archive but it doesn't actually know the exact name of that directory. +// In that case it might specify a subdir of just "*", which this function +// will then expand into the single subdirectory found inside instDir, or +// return an error if the result would be ambiguous. +func ExpandSubdirGlobs(instDir string, subDir string) (string, error) { + // We just delegate this entirely to go-getter, because older Terraform + // versions just used go-getter directly and so we need to preserve + // its various quirks for compatibility reasons. + return getter.SubdirGlob(instDir, subDir) +} diff --git a/internal/terraform/getproviders/filesystem_search.go b/internal/terraform/getproviders/filesystem_search.go index 8214f9f9..42286c46 100644 --- a/internal/terraform/getproviders/filesystem_search.go +++ b/internal/terraform/getproviders/filesystem_search.go @@ -105,7 +105,7 @@ func SearchLocalDirectory(baseDir string) (map[addrs.Provider]PackageMetaList, e } var providerAddr addrs.Provider if namespace == addrs.LegacyProviderNamespace { - if hostname != addrs.DefaultRegistryHost { + if hostname != addrs.DefaultProviderRegistryHost { log.Printf("[WARN] local provider path %q indicates a legacy provider not on the default registry host; ignoring", fullPath) return nil } diff --git a/internal/terraform/getproviders/hanging_source.go b/internal/terraform/getproviders/hanging_source.go new file mode 100644 index 00000000..4a5facb2 --- /dev/null +++ b/internal/terraform/getproviders/hanging_source.go @@ -0,0 +1,29 @@ +package getproviders + +import ( + "context" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" +) + +// HangingSource is an implementation of Source which hangs until the given +// context is cancelled. This is useful only for unit tests of user-controlled +// cancels. +type HangingSource struct { +} + +var _ Source = (*HangingSource)(nil) + +func (s *HangingSource) AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) { + <-ctx.Done() + return nil, nil, nil +} + +func (s *HangingSource) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + <-ctx.Done() + return PackageMeta{}, nil +} + +func (s *HangingSource) ForDisplay(provider addrs.Provider) string { + return "hanging source" +} diff --git a/internal/terraform/getproviders/multi_source.go b/internal/terraform/getproviders/multi_source.go index 8f1abdf9..f94dd727 100644 --- a/internal/terraform/getproviders/multi_source.go +++ b/internal/terraform/getproviders/multi_source.go @@ -229,7 +229,7 @@ const Wildcard string = "*" // We'll read the default registry host from over in the addrs package, to // avoid duplicating it. A "default" provider uses the default registry host // by definition. -var defaultRegistryHost = addrs.DefaultRegistryHost +var defaultRegistryHost = addrs.DefaultProviderRegistryHost func normalizeProviderNameOrWildcard(s string) (string, error) { if s == Wildcard { diff --git a/internal/terraform/getproviders/package_authentication.go b/internal/terraform/getproviders/package_authentication.go index 32e907de..4ebb8da6 100644 --- a/internal/terraform/getproviders/package_authentication.go +++ b/internal/terraform/getproviders/package_authentication.go @@ -9,6 +9,9 @@ import ( "log" "strings" + // TODO: replace crypto/openpgp since it is deprecated + // https://github.com/golang/go/issues/44226 + //lint:file-ignore SA1019 openpgp is deprecated but there are no good alternatives yet "golang.org/x/crypto/openpgp" openpgpArmor "golang.org/x/crypto/openpgp/armor" openpgpErrors "golang.org/x/crypto/openpgp/errors" diff --git a/internal/terraform/getproviders/package_authentication_test.go b/internal/terraform/getproviders/package_authentication_test.go index 06b7621e..45052b75 100644 --- a/internal/terraform/getproviders/package_authentication_test.go +++ b/internal/terraform/getproviders/package_authentication_test.go @@ -9,6 +9,10 @@ import ( "testing" "github.com/google/go-cmp/cmp" + + // TODO: replace crypto/openpgp since it is deprecated + // https://github.com/golang/go/issues/44226 + //lint:file-ignore SA1019 openpgp is deprecated but there are no good alternatives yet "golang.org/x/crypto/openpgp" ) diff --git a/internal/terraform/getproviders/registry_client.go b/internal/terraform/getproviders/registry_client.go index 57fa94fe..2c00577f 100644 --- a/internal/terraform/getproviders/registry_client.go +++ b/internal/terraform/getproviders/registry_client.go @@ -55,7 +55,7 @@ func init() { configureRequestTimeout() } -var SupportedPluginProtocols = MustParseVersionConstraints("~> 5") +var SupportedPluginProtocols = MustParseVersionConstraints(">= 5, <7") // registryClient is a client for the provider registry protocol that is // specialized only for the needs of this package. It's not intended as a @@ -437,7 +437,7 @@ func (c *registryClient) getFile(url *url.URL) ([]byte, error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s", resp.Status) + return nil, fmt.Errorf("%s returned from %s", resp.Status, resp.Request.Host) } data, err := ioutil.ReadAll(resp.Body) @@ -478,7 +478,7 @@ func maxRetryErrorHandler(resp *http.Response, err error, numTries int) (*http.R // both response and error. var errMsg string if resp != nil { - errMsg = fmt.Sprintf(": %d", resp.StatusCode) + errMsg = fmt.Sprintf(": %s returned from %s", resp.Status, resp.Request.Host) } else if err != nil { errMsg = fmt.Sprintf(": %s", err) } diff --git a/internal/terraform/getproviders/registry_client_test.go b/internal/terraform/getproviders/registry_client_test.go index 45ec540d..331f826d 100644 --- a/internal/terraform/getproviders/registry_client_test.go +++ b/internal/terraform/getproviders/registry_client_test.go @@ -218,6 +218,10 @@ func fakeRegistryHandler(resp http.ResponseWriter, req *http.Request) { resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(200) resp.Write([]byte(`{"versions":[{"version":"1.0.0","protocols":["0.1"]}]}`)) + case "weaksauce/protocol-six": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"versions":[{"version":"1.0.0","protocols":["6.0"]}]}`)) case "weaksauce/no-versions": resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(200) @@ -412,6 +416,12 @@ func TestFindClosestProtocolCompatibleVersion(t *testing.T) { versions.Unspecified, ``, }, + "provider protocol six": { + addrs.MustParseProviderSourceString("example.com/weaksauce/protocol-six"), + MustParseVersion("1.0.0"), + MustParseVersion("1.0.0"), + ``, + }, } for name, test := range tests { t.Run(name, func(t *testing.T) { diff --git a/internal/terraform/getproviders/types.go b/internal/terraform/getproviders/types.go index 5e6ce26f..eb23b05c 100644 --- a/internal/terraform/getproviders/types.go +++ b/internal/terraform/getproviders/types.go @@ -137,12 +137,12 @@ func (p Platform) LessThan(other Platform) bool { // ParsePlatform parses a string representation of a platform, like // "linux_amd64", or returns an error if the string is not valid. func ParsePlatform(str string) (Platform, error) { - underPos := strings.Index(str, "_") - if underPos < 1 || underPos >= len(str)-2 { + parts := strings.Split(str, "_") + if len(parts) != 2 { return Platform{}, fmt.Errorf("must be two words separated by an underscore") } - os, arch := str[:underPos], str[underPos+1:] + os, arch := parts[0], parts[1] if strings.ContainsAny(os, " \t\n\r") { return Platform{}, fmt.Errorf("OS portion must not contain whitespace") } diff --git a/internal/terraform/getproviders/types_test.go b/internal/terraform/getproviders/types_test.go index b12cc215..0008793a 100644 --- a/internal/terraform/getproviders/types_test.go +++ b/internal/terraform/getproviders/types_test.go @@ -94,3 +94,48 @@ func TestVersionConstraintsString(t *testing.T) { }) } } + +func TestParsePlatform(t *testing.T) { + tests := []struct { + Input string + Want Platform + Err bool + }{ + { + "", + Platform{}, + true, + }, + { + "too_many_notes", + Platform{}, + true, + }, + { + "extra _ whitespaces ", + Platform{}, + true, + }, + { + "arbitrary_os", + Platform{OS: "arbitrary", Arch: "os"}, + false, + }, + } + + for _, test := range tests { + got, err := ParsePlatform(test.Input) + if err != nil { + if test.Err == false { + t.Errorf("unexpected error: %s", err.Error()) + } + } else { + if test.Err { + t.Errorf("wrong result: expected error, got none") + } + } + if got != test.Want { + t.Errorf("wrong\n got: %q\nwant: %q", got, test.Want) + } + } +} diff --git a/internal/terraform/instances/expander.go b/internal/terraform/instances/expander.go new file mode 100644 index 00000000..ee3d85c7 --- /dev/null +++ b/internal/terraform/instances/expander.go @@ -0,0 +1,527 @@ +package instances + +import ( + "fmt" + "sort" + "sync" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/zclconf/go-cty/cty" +) + +// Expander instances serve as a coordination point for gathering object +// repetition values (count and for_each in configuration) and then later +// making use of them to fully enumerate all of the instances of an object. +// +// The two repeatable object types in Terraform are modules and resources. +// Because resources belong to modules and modules can nest inside other +// modules, module expansion in particular has a recursive effect that can +// cause deep objects to expand exponentially. Expander assumes that all +// instances of a module have the same static objects inside, and that they +// differ only in the repetition count for some of those objects. +// +// Expander is a synchronized object whose methods can be safely called +// from concurrent threads of execution. However, it does expect a certain +// sequence of operations which is normally obtained by the caller traversing +// a dependency graph: each object must have its repetition mode set exactly +// once, and this must be done before any calls that depend on the repetition +// mode. In other words, the count or for_each expression value for a module +// must be provided before any object nested directly or indirectly inside +// that module can be expanded. If this ordering is violated, the methods +// will panic to enforce internal consistency. +// +// The Expand* methods of Expander only work directly with modules and with +// resources. Addresses for other objects that nest within modules but +// do not themselves support repetition can be obtained by calling ExpandModule +// with the containing module path and then producing one absolute instance +// address per module instance address returned. +type Expander struct { + mu sync.RWMutex + exps *expanderModule +} + +// NewExpander initializes and returns a new Expander, empty and ready to use. +func NewExpander() *Expander { + return &Expander{ + exps: newExpanderModule(), + } +} + +// SetModuleSingle records that the given module call inside the given parent +// module does not use any repetition arguments and is therefore a singleton. +func (e *Expander) SetModuleSingle(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall) { + e.setModuleExpansion(parentAddr, callAddr, expansionSingleVal) +} + +// SetModuleCount records that the given module call inside the given parent +// module instance uses the "count" repetition argument, with the given value. +func (e *Expander) SetModuleCount(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, count int) { + e.setModuleExpansion(parentAddr, callAddr, expansionCount(count)) +} + +// SetModuleForEach records that the given module call inside the given parent +// module instance uses the "for_each" repetition argument, with the given +// map value. +// +// In the configuration language the for_each argument can also accept a set. +// It's the caller's responsibility to convert that into an identity map before +// calling this method. +func (e *Expander) SetModuleForEach(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, mapping map[string]cty.Value) { + e.setModuleExpansion(parentAddr, callAddr, expansionForEach(mapping)) +} + +// SetResourceSingle records that the given resource inside the given module +// does not use any repetition arguments and is therefore a singleton. +func (e *Expander) SetResourceSingle(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource) { + e.setResourceExpansion(moduleAddr, resourceAddr, expansionSingleVal) +} + +// SetResourceCount records that the given resource inside the given module +// uses the "count" repetition argument, with the given value. +func (e *Expander) SetResourceCount(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, count int) { + e.setResourceExpansion(moduleAddr, resourceAddr, expansionCount(count)) +} + +// SetResourceForEach records that the given resource inside the given module +// uses the "for_each" repetition argument, with the given map value. +// +// In the configuration language the for_each argument can also accept a set. +// It's the caller's responsibility to convert that into an identity map before +// calling this method. +func (e *Expander) SetResourceForEach(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, mapping map[string]cty.Value) { + e.setResourceExpansion(moduleAddr, resourceAddr, expansionForEach(mapping)) +} + +// ExpandModule finds the exhaustive set of module instances resulting from +// the expansion of the given module and all of its ancestor modules. +// +// All of the modules on the path to the identified module must already have +// had their expansion registered using one of the SetModule* methods before +// calling, or this method will panic. +func (e *Expander) ExpandModule(addr addrs.Module) []addrs.ModuleInstance { + return e.expandModule(addr, false) +} + +// expandModule allows skipping unexpanded module addresses by setting skipUnknown to true. +// This is used by instances.Set, which is only concerned with the expanded +// instances, and should not panic when looking up unknown addresses. +func (e *Expander) expandModule(addr addrs.Module, skipUnknown bool) []addrs.ModuleInstance { + if len(addr) == 0 { + // Root module is always a singleton. + return singletonRootModule + } + + e.mu.RLock() + defer e.mu.RUnlock() + + // We're going to be dynamically growing ModuleInstance addresses, so + // we'll preallocate some space to do it so that for typical shallow + // module trees we won't need to reallocate this. + // (moduleInstances does plenty of allocations itself, so the benefit of + // pre-allocating this is marginal but it's not hard to do.) + parentAddr := make(addrs.ModuleInstance, 0, 4) + ret := e.exps.moduleInstances(addr, parentAddr, skipUnknown) + sort.SliceStable(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + return ret +} + +// GetDeepestExistingModuleInstance is a funny specialized function for +// determining how many steps we can traverse through the given module instance +// address before encountering an undeclared instance of a declared module. +// +// The result is the longest prefix of the given address which steps only +// through module instances that exist. +// +// All of the modules on the given path must already have had their +// expansion registered using one of the SetModule* methods before calling, +// or this method will panic. +func (e *Expander) GetDeepestExistingModuleInstance(given addrs.ModuleInstance) addrs.ModuleInstance { + exps := e.exps // start with the root module expansions + for i := 0; i < len(given); i++ { + step := given[i] + callName := step.Name + if _, ok := exps.moduleCalls[addrs.ModuleCall{Name: callName}]; !ok { + // This is a bug in the caller, because it should always register + // expansions for an object and all of its ancestors before requesting + // expansion of it. + panic(fmt.Sprintf("no expansion has been registered for %s", given[:i].Child(callName, addrs.NoKey))) + } + + var ok bool + exps, ok = exps.childInstances[step] + if !ok { + // We've found a non-existing instance, so we're done. + return given[:i] + } + } + + // If we complete the loop above without returning early then the entire + // given address refers to a declared module instance. + return given +} + +// ExpandModuleResource finds the exhaustive set of resource instances resulting from +// the expansion of the given resource and all of its containing modules. +// +// All of the modules on the path to the identified resource and the resource +// itself must already have had their expansion registered using one of the +// SetModule*/SetResource* methods before calling, or this method will panic. +func (e *Expander) ExpandModuleResource(moduleAddr addrs.Module, resourceAddr addrs.Resource) []addrs.AbsResourceInstance { + e.mu.RLock() + defer e.mu.RUnlock() + + // We're going to be dynamically growing ModuleInstance addresses, so + // we'll preallocate some space to do it so that for typical shallow + // module trees we won't need to reallocate this. + // (moduleInstances does plenty of allocations itself, so the benefit of + // pre-allocating this is marginal but it's not hard to do.) + moduleInstanceAddr := make(addrs.ModuleInstance, 0, 4) + ret := e.exps.moduleResourceInstances(moduleAddr, resourceAddr, moduleInstanceAddr) + sort.SliceStable(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + return ret +} + +// ExpandResource finds the set of resource instances resulting from +// the expansion of the given resource within its module instance. +// +// All of the modules on the path to the identified resource and the resource +// itself must already have had their expansion registered using one of the +// SetModule*/SetResource* methods before calling, or this method will panic. +// +// ExpandModuleResource returns all instances of a resource across all +// instances of its containing module, whereas this ExpandResource function +// is more specific and only expands within a single module instance. If +// any of the module instances selected in the module path of the given address +// aren't valid for that module's expansion then ExpandResource returns an +// empty result, reflecting that a non-existing module instance can never +// contain any existing resource instances. +func (e *Expander) ExpandResource(resourceAddr addrs.AbsResource) []addrs.AbsResourceInstance { + e.mu.RLock() + defer e.mu.RUnlock() + + moduleInstanceAddr := make(addrs.ModuleInstance, 0, 4) + ret := e.exps.resourceInstances(resourceAddr.Module, resourceAddr.Resource, moduleInstanceAddr) + sort.SliceStable(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + return ret +} + +// GetModuleInstanceRepetitionData returns an object describing the values +// that should be available for each.key, each.value, and count.index within +// the call block for the given module instance. +func (e *Expander) GetModuleInstanceRepetitionData(addr addrs.ModuleInstance) RepetitionData { + if len(addr) == 0 { + // The root module is always a singleton, so it has no repetition data. + return RepetitionData{} + } + + e.mu.RLock() + defer e.mu.RUnlock() + + parentMod := e.findModule(addr[:len(addr)-1]) + lastStep := addr[len(addr)-1] + exp, ok := parentMod.moduleCalls[addrs.ModuleCall{Name: lastStep.Name}] + if !ok { + panic(fmt.Sprintf("no expansion has been registered for %s", addr)) + } + return exp.repetitionData(lastStep.InstanceKey) +} + +// GetResourceInstanceRepetitionData returns an object describing the values +// that should be available for each.key, each.value, and count.index within +// the definition block for the given resource instance. +func (e *Expander) GetResourceInstanceRepetitionData(addr addrs.AbsResourceInstance) RepetitionData { + e.mu.RLock() + defer e.mu.RUnlock() + + parentMod := e.findModule(addr.Module) + exp, ok := parentMod.resources[addr.Resource.Resource] + if !ok { + panic(fmt.Sprintf("no expansion has been registered for %s", addr.ContainingResource())) + } + return exp.repetitionData(addr.Resource.Key) +} + +// AllInstances returns a set of all of the module and resource instances known +// to the expander. +// +// It generally doesn't make sense to call this until everything has already +// been fully expanded by calling the SetModule* and SetResource* functions. +// After that, the returned set is a convenient small API only for querying +// whether particular instance addresses appeared as a result of those +// expansions. +func (e *Expander) AllInstances() Set { + return Set{e} +} + +func (e *Expander) findModule(moduleInstAddr addrs.ModuleInstance) *expanderModule { + // We expect that all of the modules on the path to our module instance + // should already have expansions registered. + mod := e.exps + for i, step := range moduleInstAddr { + next, ok := mod.childInstances[step] + if !ok { + // Top-down ordering of registration is part of the contract of + // Expander, so this is always indicative of a bug in the caller. + panic(fmt.Sprintf("no expansion has been registered for ancestor module %s", moduleInstAddr[:i+1])) + } + mod = next + } + return mod +} + +func (e *Expander) setModuleExpansion(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, exp expansion) { + e.mu.Lock() + defer e.mu.Unlock() + + mod := e.findModule(parentAddr) + if _, exists := mod.moduleCalls[callAddr]; exists { + panic(fmt.Sprintf("expansion already registered for %s", parentAddr.Child(callAddr.Name, addrs.NoKey))) + } + // We'll also pre-register the child instances so that later calls can + // populate them as the caller traverses the configuration tree. + for _, key := range exp.instanceKeys() { + step := addrs.ModuleInstanceStep{Name: callAddr.Name, InstanceKey: key} + mod.childInstances[step] = newExpanderModule() + } + mod.moduleCalls[callAddr] = exp +} + +func (e *Expander) setResourceExpansion(parentAddr addrs.ModuleInstance, resourceAddr addrs.Resource, exp expansion) { + e.mu.Lock() + defer e.mu.Unlock() + + mod := e.findModule(parentAddr) + if _, exists := mod.resources[resourceAddr]; exists { + panic(fmt.Sprintf("expansion already registered for %s", resourceAddr.Absolute(parentAddr))) + } + mod.resources[resourceAddr] = exp +} + +func (e *Expander) knowsModuleInstance(want addrs.ModuleInstance) bool { + if want.IsRoot() { + return true // root module instance is always present + } + + e.mu.Lock() + defer e.mu.Unlock() + + return e.exps.knowsModuleInstance(want) +} + +func (e *Expander) knowsModuleCall(want addrs.AbsModuleCall) bool { + e.mu.Lock() + defer e.mu.Unlock() + + return e.exps.knowsModuleCall(want) +} + +func (e *Expander) knowsResourceInstance(want addrs.AbsResourceInstance) bool { + e.mu.Lock() + defer e.mu.Unlock() + + return e.exps.knowsResourceInstance(want) +} + +func (e *Expander) knowsResource(want addrs.AbsResource) bool { + e.mu.Lock() + defer e.mu.Unlock() + + return e.exps.knowsResource(want) +} + +type expanderModule struct { + moduleCalls map[addrs.ModuleCall]expansion + resources map[addrs.Resource]expansion + childInstances map[addrs.ModuleInstanceStep]*expanderModule +} + +func newExpanderModule() *expanderModule { + return &expanderModule{ + moduleCalls: make(map[addrs.ModuleCall]expansion), + resources: make(map[addrs.Resource]expansion), + childInstances: make(map[addrs.ModuleInstanceStep]*expanderModule), + } +} + +var singletonRootModule = []addrs.ModuleInstance{addrs.RootModuleInstance} + +// if moduleInstances is being used to lookup known instances after all +// expansions have been done, set skipUnknown to true which allows addrs which +// may not have been seen to return with no instances rather than panicking. +func (m *expanderModule) moduleInstances(addr addrs.Module, parentAddr addrs.ModuleInstance, skipUnknown bool) []addrs.ModuleInstance { + callName := addr[0] + exp, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}] + if !ok { + if skipUnknown { + return nil + } + // This is a bug in the caller, because it should always register + // expansions for an object and all of its ancestors before requesting + // expansion of it. + panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) + } + + var ret []addrs.ModuleInstance + + // If there's more than one step remaining then we need to traverse deeper. + if len(addr) > 1 { + for step, inst := range m.childInstances { + if step.Name != callName { + continue + } + instAddr := append(parentAddr, step) + ret = append(ret, inst.moduleInstances(addr[1:], instAddr, skipUnknown)...) + } + return ret + } + + // Otherwise, we'll use the expansion from the final step to produce + // a sequence of addresses under this prefix. + for _, k := range exp.instanceKeys() { + // We're reusing the buffer under parentAddr as we recurse through + // the structure, so we need to copy it here to produce a final + // immutable slice to return. + full := make(addrs.ModuleInstance, 0, len(parentAddr)+1) + full = append(full, parentAddr...) + full = full.Child(callName, k) + ret = append(ret, full) + } + return ret +} + +func (m *expanderModule) moduleResourceInstances(moduleAddr addrs.Module, resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { + if len(moduleAddr) > 0 { + var ret []addrs.AbsResourceInstance + // We need to traverse through the module levels first, so we can + // then iterate resource expansions in the context of each module + // path leading to them. + callName := moduleAddr[0] + if _, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}]; !ok { + // This is a bug in the caller, because it should always register + // expansions for an object and all of its ancestors before requesting + // expansion of it. + panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) + } + + for step, inst := range m.childInstances { + if step.Name != callName { + continue + } + moduleInstAddr := append(parentAddr, step) + ret = append(ret, inst.moduleResourceInstances(moduleAddr[1:], resourceAddr, moduleInstAddr)...) + } + return ret + } + + return m.onlyResourceInstances(resourceAddr, parentAddr) +} + +func (m *expanderModule) resourceInstances(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { + if len(moduleAddr) > 0 { + // We need to traverse through the module levels first, using only the + // module instances for our specific resource, as the resource may not + // yet be expanded in all module instances. + step := moduleAddr[0] + callName := step.Name + if _, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}]; !ok { + // This is a bug in the caller, because it should always register + // expansions for an object and all of its ancestors before requesting + // expansion of it. + panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) + } + + if inst, ok := m.childInstances[step]; ok { + moduleInstAddr := append(parentAddr, step) + return inst.resourceInstances(moduleAddr[1:], resourceAddr, moduleInstAddr) + } else { + // If we have the module _call_ registered (as we checked above) + // but we don't have the given module _instance_ registered, that + // suggests that the module instance key in "step" is not declared + // by the current definition of this module call. That means the + // module instance doesn't exist at all, and therefore it can't + // possibly declare any resource instances either. + // + // For example, if we were asked about module.foo[0].aws_instance.bar + // but module.foo doesn't currently have count set, then there is no + // module.foo[0] at all, and therefore no aws_instance.bar + // instances inside it. + return nil + } + } + return m.onlyResourceInstances(resourceAddr, parentAddr) +} + +func (m *expanderModule) onlyResourceInstances(resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { + var ret []addrs.AbsResourceInstance + exp, ok := m.resources[resourceAddr] + if !ok { + panic(fmt.Sprintf("no expansion has been registered for %s", resourceAddr.Absolute(parentAddr))) + } + + for _, k := range exp.instanceKeys() { + // We're reusing the buffer under parentAddr as we recurse through + // the structure, so we need to copy it here to produce a final + // immutable slice to return. + moduleAddr := make(addrs.ModuleInstance, len(parentAddr)) + copy(moduleAddr, parentAddr) + ret = append(ret, resourceAddr.Instance(k).Absolute(moduleAddr)) + } + return ret +} + +func (m *expanderModule) getModuleInstance(want addrs.ModuleInstance) *expanderModule { + current := m + for _, step := range want { + next := current.childInstances[step] + if next == nil { + return nil + } + current = next + } + return current +} + +func (m *expanderModule) knowsModuleInstance(want addrs.ModuleInstance) bool { + return m.getModuleInstance(want) != nil +} + +func (m *expanderModule) knowsModuleCall(want addrs.AbsModuleCall) bool { + modInst := m.getModuleInstance(want.Module) + if modInst == nil { + return false + } + _, ret := modInst.moduleCalls[want.Call] + return ret +} + +func (m *expanderModule) knowsResourceInstance(want addrs.AbsResourceInstance) bool { + modInst := m.getModuleInstance(want.Module) + if modInst == nil { + return false + } + resourceExp := modInst.resources[want.Resource.Resource] + if resourceExp == nil { + return false + } + for _, key := range resourceExp.instanceKeys() { + if key == want.Resource.Key { + return true + } + } + return false +} + +func (m *expanderModule) knowsResource(want addrs.AbsResource) bool { + modInst := m.getModuleInstance(want.Module) + if modInst == nil { + return false + } + _, ret := modInst.resources[want.Resource] + return ret +} diff --git a/internal/terraform/instances/expander_test.go b/internal/terraform/instances/expander_test.go new file mode 100644 index 00000000..f5f016d4 --- /dev/null +++ b/internal/terraform/instances/expander_test.go @@ -0,0 +1,534 @@ +package instances + +import ( + "fmt" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" +) + +func TestExpander(t *testing.T) { + // Some module and resource addresses and values we'll use repeatedly below. + singleModuleAddr := addrs.ModuleCall{Name: "single"} + count2ModuleAddr := addrs.ModuleCall{Name: "count2"} + count0ModuleAddr := addrs.ModuleCall{Name: "count0"} + forEachModuleAddr := addrs.ModuleCall{Name: "for_each"} + singleResourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "single", + } + count2ResourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "count2", + } + count0ResourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "count0", + } + forEachResourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "for_each", + } + eachMap := map[string]cty.Value{ + "a": cty.NumberIntVal(1), + "b": cty.NumberIntVal(2), + } + + // In normal use, Expander would be called in the context of a graph + // traversal to ensure that information is registered/requested in the + // correct sequence, but to keep this test self-contained we'll just + // manually write out the steps here. + // + // The steps below are assuming a configuration tree like the following: + // - root module + // - resource test.single with no count or for_each + // - resource test.count2 with count = 2 + // - resource test.count0 with count = 0 + // - resource test.for_each with for_each = { a = 1, b = 2 } + // - child module "single" with no count or for_each + // - resource test.single with no count or for_each + // - resource test.count2 with count = 2 + // - child module "count2" with count = 2 + // - resource test.single with no count or for_each + // - resource test.count2 with count = 2 + // - child module "count2" with count = 2 + // - resource test.count2 with count = 2 + // - child module "count0" with count = 0 + // - resource test.single with no count or for_each + // - child module for_each with for_each = { a = 1, b = 2 } + // - resource test.single with no count or for_each + // - resource test.count2 with count = 2 + + ex := NewExpander() + + // We don't register the root module, because it's always implied to exist. + // + // Below we're going to use braces and indentation just to help visually + // reflect the tree structure from the tree in the above comment, in the + // hope that the following is easier to follow. + // + // The Expander API requires that we register containing modules before + // registering anything inside them, so we'll work through the above + // in a depth-first order in the registration steps that follow. + { + ex.SetResourceSingle(addrs.RootModuleInstance, singleResourceAddr) + ex.SetResourceCount(addrs.RootModuleInstance, count2ResourceAddr, 2) + ex.SetResourceCount(addrs.RootModuleInstance, count0ResourceAddr, 0) + ex.SetResourceForEach(addrs.RootModuleInstance, forEachResourceAddr, eachMap) + + ex.SetModuleSingle(addrs.RootModuleInstance, singleModuleAddr) + { + // The single instance of the module + moduleInstanceAddr := addrs.RootModuleInstance.Child("single", addrs.NoKey) + ex.SetResourceSingle(moduleInstanceAddr, singleResourceAddr) + ex.SetResourceCount(moduleInstanceAddr, count2ResourceAddr, 2) + } + + ex.SetModuleCount(addrs.RootModuleInstance, count2ModuleAddr, 2) + for i1 := 0; i1 < 2; i1++ { + moduleInstanceAddr := addrs.RootModuleInstance.Child("count2", addrs.IntKey(i1)) + ex.SetResourceSingle(moduleInstanceAddr, singleResourceAddr) + ex.SetResourceCount(moduleInstanceAddr, count2ResourceAddr, 2) + ex.SetModuleCount(moduleInstanceAddr, count2ModuleAddr, 2) + for i2 := 0; i2 < 2; i2++ { + moduleInstanceAddr := moduleInstanceAddr.Child("count2", addrs.IntKey(i2)) + ex.SetResourceCount(moduleInstanceAddr, count2ResourceAddr, 2) + } + } + + ex.SetModuleCount(addrs.RootModuleInstance, count0ModuleAddr, 0) + { + // There are no instances of module "count0", so our nested module + // would never actually get registered here: the expansion node + // for the resource would see that its containing module has no + // instances and so do nothing. + } + + ex.SetModuleForEach(addrs.RootModuleInstance, forEachModuleAddr, eachMap) + for k := range eachMap { + moduleInstanceAddr := addrs.RootModuleInstance.Child("for_each", addrs.StringKey(k)) + ex.SetResourceSingle(moduleInstanceAddr, singleResourceAddr) + ex.SetResourceCount(moduleInstanceAddr, count2ResourceAddr, 2) + } + } + + t.Run("root module", func(t *testing.T) { + // Requesting expansion of the root module doesn't really mean anything + // since it's always a singleton, but for consistency it should work. + got := ex.ExpandModule(addrs.RootModule) + want := []addrs.ModuleInstance{addrs.RootModuleInstance} + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + addrs.RootModule, + singleResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`test.single`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("resource count2", func(t *testing.T) { + got := ex.ExpandModuleResource( + addrs.RootModule, + count2ResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`test.count2[0]`), + mustAbsResourceInstanceAddr(`test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("resource count0", func(t *testing.T) { + got := ex.ExpandModuleResource( + addrs.RootModule, + count0ResourceAddr, + ) + want := []addrs.AbsResourceInstance(nil) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("resource for_each", func(t *testing.T) { + got := ex.ExpandModuleResource( + addrs.RootModule, + forEachResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`test.for_each["a"]`), + mustAbsResourceInstanceAddr(`test.for_each["b"]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module single", func(t *testing.T) { + got := ex.ExpandModule(addrs.RootModule.Child("single")) + want := []addrs.ModuleInstance{ + mustModuleInstanceAddr(`module.single`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module single resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr("single"), + singleResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr("module.single.test.single"), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module single resource count2", func(t *testing.T) { + // Two different ways of asking the same question, which should + // both produce the same result. + // First: nested expansion of all instances of the resource across + // all instances of the module, but it's a single-instance module + // so the first level is a singleton. + got1 := ex.ExpandModuleResource( + mustModuleAddr(`single`), + count2ResourceAddr, + ) + // Second: expansion of only instances belonging to a specific + // instance of the module, but again it's a single-instance module + // so there's only one to ask about. + got2 := ex.ExpandResource( + count2ResourceAddr.Absolute( + addrs.RootModuleInstance.Child("single", addrs.NoKey), + ), + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.single.test.count2[0]`), + mustAbsResourceInstanceAddr(`module.single.test.count2[1]`), + } + if diff := cmp.Diff(want, got1); diff != "" { + t.Errorf("wrong ExpandModuleResource result\n%s", diff) + } + if diff := cmp.Diff(want, got2); diff != "" { + t.Errorf("wrong ExpandResource result\n%s", diff) + } + }) + t.Run("module single resource count2 with non-existing module instance", func(t *testing.T) { + got := ex.ExpandResource( + count2ResourceAddr.Absolute( + // Note: This is intentionally an invalid instance key, + // so we're asking about module.single[1].test.count2 + // even though module.single doesn't have count set and + // therefore there is no module.single[1]. + addrs.RootModuleInstance.Child("single", addrs.IntKey(1)), + ), + ) + // If the containing module instance doesn't exist then it can't + // possibly have any resource instances inside it. + want := ([]addrs.AbsResourceInstance)(nil) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2", func(t *testing.T) { + got := ex.ExpandModule(mustModuleAddr(`count2`)) + want := []addrs.ModuleInstance{ + mustModuleInstanceAddr(`module.count2[0]`), + mustModuleInstanceAddr(`module.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`count2`), + singleResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.count2[0].test.single`), + mustAbsResourceInstanceAddr(`module.count2[1].test.single`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 resource count2", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`count2`), + count2ResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.count2[0].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[0].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.count2[1].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[1].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 module count2", func(t *testing.T) { + got := ex.ExpandModule(mustModuleAddr(`count2.count2`)) + want := []addrs.ModuleInstance{ + mustModuleInstanceAddr(`module.count2[0].module.count2[0]`), + mustModuleInstanceAddr(`module.count2[0].module.count2[1]`), + mustModuleInstanceAddr(`module.count2[1].module.count2[0]`), + mustModuleInstanceAddr(`module.count2[1].module.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 module count2 GetDeepestExistingModuleInstance", func(t *testing.T) { + t.Run("first step invalid", func(t *testing.T) { + got := ex.GetDeepestExistingModuleInstance(mustModuleInstanceAddr(`module.count2["nope"].module.count2[0]`)) + want := addrs.RootModuleInstance + if !want.Equal(got) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("second step invalid", func(t *testing.T) { + got := ex.GetDeepestExistingModuleInstance(mustModuleInstanceAddr(`module.count2[1].module.count2`)) + want := mustModuleInstanceAddr(`module.count2[1]`) + if !want.Equal(got) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("neither step valid", func(t *testing.T) { + got := ex.GetDeepestExistingModuleInstance(mustModuleInstanceAddr(`module.count2.module.count2["nope"]`)) + want := addrs.RootModuleInstance + if !want.Equal(got) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("both steps valid", func(t *testing.T) { + got := ex.GetDeepestExistingModuleInstance(mustModuleInstanceAddr(`module.count2[1].module.count2[0]`)) + want := mustModuleInstanceAddr(`module.count2[1].module.count2[0]`) + if !want.Equal(got) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + }) + t.Run("module count2 resource count2 resource count2", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`count2.count2`), + count2ResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[0].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[0].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[1].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[1].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.count2[1].module.count2[0].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[1].module.count2[0].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.count2[1].module.count2[1].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[1].module.count2[1].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 resource count2 resource count2", func(t *testing.T) { + got := ex.ExpandResource( + count2ResourceAddr.Absolute(mustModuleInstanceAddr(`module.count2[0].module.count2[1]`)), + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[1].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[1].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count0", func(t *testing.T) { + got := ex.ExpandModule(mustModuleAddr(`count0`)) + want := []addrs.ModuleInstance(nil) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count0 resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`count0`), + singleResourceAddr, + ) + // The containing module has zero instances, so therefore there + // are zero instances of this resource even though it doesn't have + // count = 0 set itself. + want := []addrs.AbsResourceInstance(nil) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module for_each", func(t *testing.T) { + got := ex.ExpandModule(mustModuleAddr(`for_each`)) + want := []addrs.ModuleInstance{ + mustModuleInstanceAddr(`module.for_each["a"]`), + mustModuleInstanceAddr(`module.for_each["b"]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module for_each resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`for_each`), + singleResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.for_each["a"].test.single`), + mustAbsResourceInstanceAddr(`module.for_each["b"].test.single`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module for_each resource count2", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`for_each`), + count2ResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.for_each["b"].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.for_each["b"].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module for_each resource count2", func(t *testing.T) { + got := ex.ExpandResource( + count2ResourceAddr.Absolute(mustModuleInstanceAddr(`module.for_each["a"]`)), + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + + t.Run(`module.for_each["b"] repetitiondata`, func(t *testing.T) { + got := ex.GetModuleInstanceRepetitionData( + mustModuleInstanceAddr(`module.for_each["b"]`), + ) + want := RepetitionData{ + EachKey: cty.StringVal("b"), + EachValue: cty.NumberIntVal(2), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run(`module.count2[0].module.count2[1] repetitiondata`, func(t *testing.T) { + got := ex.GetModuleInstanceRepetitionData( + mustModuleInstanceAddr(`module.count2[0].module.count2[1]`), + ) + want := RepetitionData{ + CountIndex: cty.NumberIntVal(1), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run(`module.for_each["a"] repetitiondata`, func(t *testing.T) { + got := ex.GetModuleInstanceRepetitionData( + mustModuleInstanceAddr(`module.for_each["a"]`), + ) + want := RepetitionData{ + EachKey: cty.StringVal("a"), + EachValue: cty.NumberIntVal(1), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + + t.Run(`test.for_each["a"] repetitiondata`, func(t *testing.T) { + got := ex.GetResourceInstanceRepetitionData( + mustAbsResourceInstanceAddr(`test.for_each["a"]`), + ) + want := RepetitionData{ + EachKey: cty.StringVal("a"), + EachValue: cty.NumberIntVal(1), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run(`module.for_each["a"].test.single repetitiondata`, func(t *testing.T) { + got := ex.GetResourceInstanceRepetitionData( + mustAbsResourceInstanceAddr(`module.for_each["a"].test.single`), + ) + want := RepetitionData{} + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run(`module.for_each["a"].test.count2[1] repetitiondata`, func(t *testing.T) { + got := ex.GetResourceInstanceRepetitionData( + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[1]`), + ) + want := RepetitionData{ + CountIndex: cty.NumberIntVal(1), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) +} + +func mustAbsResourceInstanceAddr(str string) addrs.AbsResourceInstance { + addr, diags := addrs.ParseAbsResourceInstanceStr(str) + if diags.HasErrors() { + panic(fmt.Sprintf("invalid absolute resource instance address: %s", diags.Err())) + } + return addr +} + +func mustModuleAddr(str string) addrs.Module { + if len(str) == 0 { + return addrs.RootModule + } + // We don't have a real parser for these because they don't appear in the + // language anywhere, but this interpretation mimics the format we + // produce from the String method on addrs.Module. + parts := strings.Split(str, ".") + return addrs.Module(parts) +} + +func mustModuleInstanceAddr(str string) addrs.ModuleInstance { + if len(str) == 0 { + return addrs.RootModuleInstance + } + addr, diags := addrs.ParseModuleInstanceStr(str) + if diags.HasErrors() { + panic(fmt.Sprintf("invalid module instance address: %s", diags.Err())) + } + return addr +} + +func valueEquals(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + return a == b + } + return a.RawEquals(b) +} diff --git a/internal/terraform/instances/expansion_mode.go b/internal/terraform/instances/expansion_mode.go new file mode 100644 index 00000000..82b71f6b --- /dev/null +++ b/internal/terraform/instances/expansion_mode.go @@ -0,0 +1,85 @@ +package instances + +import ( + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" +) + +// expansion is an internal interface used to represent the different +// ways expansion can operate depending on how repetition is configured for +// an object. +type expansion interface { + instanceKeys() []addrs.InstanceKey + repetitionData(addrs.InstanceKey) RepetitionData +} + +// expansionSingle is the expansion corresponding to no repetition arguments +// at all, producing a single object with no key. +// +// expansionSingleVal is the only valid value of this type. +type expansionSingle uintptr + +var singleKeys = []addrs.InstanceKey{addrs.NoKey} +var expansionSingleVal expansionSingle + +func (e expansionSingle) instanceKeys() []addrs.InstanceKey { + return singleKeys +} + +func (e expansionSingle) repetitionData(key addrs.InstanceKey) RepetitionData { + if key != addrs.NoKey { + panic("cannot use instance key with non-repeating object") + } + return RepetitionData{} +} + +// expansionCount is the expansion corresponding to the "count" argument. +type expansionCount int + +func (e expansionCount) instanceKeys() []addrs.InstanceKey { + ret := make([]addrs.InstanceKey, int(e)) + for i := range ret { + ret[i] = addrs.IntKey(i) + } + return ret +} + +func (e expansionCount) repetitionData(key addrs.InstanceKey) RepetitionData { + i := int(key.(addrs.IntKey)) + if i < 0 || i >= int(e) { + panic(fmt.Sprintf("instance key %d out of range for count %d", i, e)) + } + return RepetitionData{ + CountIndex: cty.NumberIntVal(int64(i)), + } +} + +// expansionForEach is the expansion corresponding to the "for_each" argument. +type expansionForEach map[string]cty.Value + +func (e expansionForEach) instanceKeys() []addrs.InstanceKey { + ret := make([]addrs.InstanceKey, 0, len(e)) + for k := range e { + ret = append(ret, addrs.StringKey(k)) + } + sort.Slice(ret, func(i, j int) bool { + return ret[i].(addrs.StringKey) < ret[j].(addrs.StringKey) + }) + return ret +} + +func (e expansionForEach) repetitionData(key addrs.InstanceKey) RepetitionData { + k := string(key.(addrs.StringKey)) + v, ok := e[k] + if !ok { + panic(fmt.Sprintf("instance key %q does not match any instance", k)) + } + return RepetitionData{ + EachKey: cty.StringVal(k), + EachValue: v, + } +} diff --git a/internal/terraform/instances/instance_key_data.go b/internal/terraform/instances/instance_key_data.go new file mode 100644 index 00000000..9ada5253 --- /dev/null +++ b/internal/terraform/instances/instance_key_data.go @@ -0,0 +1,28 @@ +package instances + +import ( + "github.com/zclconf/go-cty/cty" +) + +// RepetitionData represents the values available to identify individual +// repetitions of a particular object. +// +// This corresponds to the each.key, each.value, and count.index symbols in +// the configuration language. +type RepetitionData struct { + // CountIndex is the value for count.index, or cty.NilVal if evaluating + // in a context where the "count" argument is not active. + // + // For correct operation, this should always be of type cty.Number if not + // nil. + CountIndex cty.Value + + // EachKey and EachValue are the values for each.key and each.value + // respectively, or cty.NilVal if evaluating in a context where the + // "for_each" argument is not active. These must either both be set + // or neither set. + // + // For correct operation, EachKey must always be either of type cty.String + // or cty.Number if not nil. + EachKey, EachValue cty.Value +} diff --git a/internal/terraform/instances/set.go b/internal/terraform/instances/set.go new file mode 100644 index 00000000..ecdec739 --- /dev/null +++ b/internal/terraform/instances/set.go @@ -0,0 +1,51 @@ +package instances + +import ( + "github.com/camptocamp/terraboard/internal/terraform/addrs" +) + +// Set is a set of instances, intended mainly for the return value of +// Expander.AllInstances, where it therefore represents all of the module +// and resource instances known to the expander. +type Set struct { + // Set currently really just wraps Expander with a reduced API that + // only supports lookups, to make it clear that a holder of a Set should + // not be modifying the expander any further. + exp *Expander +} + +// HasModuleInstance returns true if and only if the set contains the module +// instance with the given address. +func (s Set) HasModuleInstance(want addrs.ModuleInstance) bool { + return s.exp.knowsModuleInstance(want) +} + +// HasModuleCall returns true if and only if the set contains the module +// call with the given address, even if that module call has no instances. +func (s Set) HasModuleCall(want addrs.AbsModuleCall) bool { + return s.exp.knowsModuleCall(want) +} + +// HasResourceInstance returns true if and only if the set contains the resource +// instance with the given address. +// TODO: +func (s Set) HasResourceInstance(want addrs.AbsResourceInstance) bool { + return s.exp.knowsResourceInstance(want) +} + +// HasResource returns true if and only if the set contains the resource with +// the given address, even if that resource has no instances. +// TODO: +func (s Set) HasResource(want addrs.AbsResource) bool { + return s.exp.knowsResource(want) +} + +// InstancesForModule returns all of the module instances that correspond with +// the given static module path. +// +// If there are multiple module calls in the path that have repetition enabled +// then the result is the full expansion of all combinations of all of their +// declared instance keys. +func (s Set) InstancesForModule(modAddr addrs.Module) []addrs.ModuleInstance { + return s.exp.expandModule(modAddr, true) +} diff --git a/internal/terraform/instances/set_test.go b/internal/terraform/instances/set_test.go new file mode 100644 index 00000000..e4b9774f --- /dev/null +++ b/internal/terraform/instances/set_test.go @@ -0,0 +1,211 @@ +package instances + +import ( + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/zclconf/go-cty/cty" +) + +func TestSet(t *testing.T) { + exp := NewExpander() + + // The following constructs the following imaginary module/resource tree: + // - root module + // - test_thing.single: no repetition + // - test_thing.count: count = 1 + // - test_thing.for_each: for_each = { c = "C" } + // - module.single: no repetition + // - test_thing.single: no repetition + // - module.nested_single: no repetition + // - module.zero_count: count = 0 + // - module.count: count = 2 + // - module.nested_for_each: [0] for_each = {}, [1] for_each = { e = "E" } + // - module.for_each: for_each = { a = "A", b = "B" } + // - test_thing.count: ["a"] count = 0, ["b"] count = 1 + exp.SetModuleSingle(addrs.RootModuleInstance, addrs.ModuleCall{Name: "single"}) + exp.SetModuleCount(addrs.RootModuleInstance, addrs.ModuleCall{Name: "count"}, 2) + exp.SetModuleForEach(addrs.RootModuleInstance, addrs.ModuleCall{Name: "for_each"}, map[string]cty.Value{ + "a": cty.StringVal("A"), + "b": cty.StringVal("B"), + }) + exp.SetModuleSingle(addrs.RootModuleInstance.Child("single", addrs.NoKey), addrs.ModuleCall{Name: "nested_single"}) + exp.SetModuleForEach(addrs.RootModuleInstance.Child("count", addrs.IntKey(0)), addrs.ModuleCall{Name: "nested_for_each"}, nil) + exp.SetModuleForEach(addrs.RootModuleInstance.Child("count", addrs.IntKey(1)), addrs.ModuleCall{Name: "nested_for_each"}, map[string]cty.Value{ + "e": cty.StringVal("E"), + }) + exp.SetModuleCount( + addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey), + addrs.ModuleCall{Name: "zero_count"}, + 0, + ) + + rAddr := func(name string) addrs.Resource { + return addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: name, + } + } + exp.SetResourceSingle(addrs.RootModuleInstance, rAddr("single")) + exp.SetResourceCount(addrs.RootModuleInstance, rAddr("count"), 1) + exp.SetResourceForEach(addrs.RootModuleInstance, rAddr("for_each"), map[string]cty.Value{ + "c": cty.StringVal("C"), + }) + exp.SetResourceSingle(addrs.RootModuleInstance.Child("single", addrs.NoKey), rAddr("single")) + exp.SetResourceCount(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("a")), rAddr("count"), 0) + exp.SetResourceCount(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("b")), rAddr("count"), 1) + + set := exp.AllInstances() + + // HasModuleInstance tests + if input := addrs.RootModuleInstance; !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(0)); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(1)); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(1)).Child("nested_for_each", addrs.StringKey("e")); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("for_each", addrs.StringKey("a")); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("for_each", addrs.StringKey("b")); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.IntKey(0)); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.StringKey("a")); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nonexist", addrs.NoKey); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.NoKey); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(2)); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.StringKey("a")); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(0)).Child("nested_for_each", addrs.StringKey("e")); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey).Child("zero_count", addrs.NoKey); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey).Child("zero_count", addrs.IntKey(0)); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + + // HasModuleCall tests + if input := addrs.RootModuleInstance.ChildCall("single"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).ChildCall("nested_single"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.ChildCall("count"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(0)).ChildCall("nested_for_each"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(1)).ChildCall("nested_for_each"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.ChildCall("for_each"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey).ChildCall("zero_count"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.ChildCall("nonexist"); set.HasModuleCall(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).ChildCall("nonexist"); set.HasModuleCall(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + + // HasResourceInstance tests + if input := rAddr("single").Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("for_each").Instance(addrs.StringKey("c")).Absolute(addrs.RootModuleInstance); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("single").Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("single", addrs.NoKey)); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("b"))); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("single").Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("single").Instance(addrs.StringKey("")).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.StringKey("")).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("single").Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("single", addrs.IntKey(0))); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("a"))); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + + // HasResource tests + if input := rAddr("single").Absolute(addrs.RootModuleInstance); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Absolute(addrs.RootModuleInstance); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("for_each").Absolute(addrs.RootModuleInstance); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("single").Absolute(addrs.RootModuleInstance.Child("single", addrs.NoKey)); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("a"))); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("b"))); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("nonexist").Absolute(addrs.RootModuleInstance); set.HasResource(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("nonexist"))); set.HasResource(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + + // ensure we can lookup non-existent addrs in a set without panic + if set.InstancesForModule(addrs.RootModule.Child("missing")) != nil { + t.Error("unexpected instances from missing module") + } +} diff --git a/internal/terraform/ipaddr/LICENSE b/internal/terraform/ipaddr/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/internal/terraform/ipaddr/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/terraform/ipaddr/PATENTS b/internal/terraform/ipaddr/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/internal/terraform/ipaddr/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/internal/terraform/ipaddr/README.md b/internal/terraform/ipaddr/README.md new file mode 100644 index 00000000..f0d54988 --- /dev/null +++ b/internal/terraform/ipaddr/README.md @@ -0,0 +1,34 @@ +# Forked IP address parsing functions + +This directory contains a subset of code from the Go project's `net` package +as of Go 1.16, used under the Go project license which we've included here +in [`LICENSE`](LICENSE) and [`PATENTS`](PATENTS), which are also copied from +the Go project. + +Terraform has its own fork of these functions because Go 1.17 included a +breaking change to reject IPv4 address octets written with leading zeros. + +The Go project rationale for that change was that Go historically interpreted +leading-zero octets inconsistently with many other implementations, trimming +off the zeros and still treating the rest as decimal rather than treating the +octet as octal. + +The Go team made the reasonable observation that having a function that +interprets a non-normalized form in a manner inconsistent with other +implementations may cause naive validation or policy checks to produce +incorrect results, and thus it's a potential security concern. For more +information, see [Go issue #30999](https://golang.org/issue/30999). + +After careful consideration, the Terraform team has concluded that Terraform's +use of these functions as part of the implementation of the `cidrhost`, +`cidrsubnet`, `cidrsubnets`, and `cidrnetmask` functions has a more limited +impact than the general availability of these functions in the Go standard +library, and so we can't justify a similar exception to our Terraform 1.0 +compatibility promises as the Go team made to their Go 1.0 compatibility +promises. + +If you're considering using this package for new functionality _other than_ the +built-in functions mentioned above, please do so only if consistency with the +behavior of those functions is important. Otherwise, new features are not +burdened by the same compatibility constraints and so should typically prefer +to use the stricter interpretation of the upstream parsing functions. diff --git a/internal/terraform/ipaddr/doc.go b/internal/terraform/ipaddr/doc.go new file mode 100644 index 00000000..68d79c3c --- /dev/null +++ b/internal/terraform/ipaddr/doc.go @@ -0,0 +1,6 @@ +// Package ipaddr is a fork of a subset of the Go standard "net" package which +// retains parsing behaviors from Go 1.16 or earlier. +// +// Don't use this for any new code without careful consideration. See the +// README.md in the package directory for more information. +package ipaddr diff --git a/internal/terraform/ipaddr/ip.go b/internal/terraform/ipaddr/ip.go new file mode 100644 index 00000000..6d1c75d5 --- /dev/null +++ b/internal/terraform/ipaddr/ip.go @@ -0,0 +1,226 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IP address manipulations +// +// IPv4 addresses are 4 bytes; IPv6 addresses are 16 bytes. +// An IPv4 address can be converted to an IPv6 address by +// adding a canonical prefix (10 zeros, 2 0xFFs). +// This library accepts either size of byte slice but always +// returns 16-byte addresses. + +package ipaddr + +import ( + stdnet "net" +) + +// +// Lean on the standard net lib as much as possible. +// + +type IP = stdnet.IP +type IPNet = stdnet.IPNet +type ParseError = stdnet.ParseError + +const IPv4len = stdnet.IPv4len +const IPv6len = stdnet.IPv6len + +var CIDRMask = stdnet.CIDRMask +var IPv4 = stdnet.IPv4 + +// Parse IPv4 address (d.d.d.d). +func parseIPv4(s string) IP { + var p [IPv4len]byte + for i := 0; i < IPv4len; i++ { + if len(s) == 0 { + // Missing octets. + return nil + } + if i > 0 { + if s[0] != '.' { + return nil + } + s = s[1:] + } + n, c, ok := dtoi(s) + if !ok || n > 0xFF { + return nil + } + // + // NOTE: This correct check was added for go-1.17, but is a + // backwards-incompatible change for Terraform users, who might have + // already written modules with leading zeroes. + // + //if c > 1 && s[0] == '0' { + // // Reject non-zero components with leading zeroes. + // return nil + //} + s = s[c:] + p[i] = byte(n) + } + if len(s) != 0 { + return nil + } + return IPv4(p[0], p[1], p[2], p[3]) +} + +// parseIPv6 parses s as a literal IPv6 address described in RFC 4291 +// and RFC 5952. +func parseIPv6(s string) (ip IP) { + ip = make(IP, IPv6len) + ellipsis := -1 // position of ellipsis in ip + + // Might have leading ellipsis + if len(s) >= 2 && s[0] == ':' && s[1] == ':' { + ellipsis = 0 + s = s[2:] + // Might be only ellipsis + if len(s) == 0 { + return ip + } + } + + // Loop, parsing hex numbers followed by colon. + i := 0 + for i < IPv6len { + // Hex number. + n, c, ok := xtoi(s) + if !ok || n > 0xFFFF { + return nil + } + + // If followed by dot, might be in trailing IPv4. + if c < len(s) && s[c] == '.' { + if ellipsis < 0 && i != IPv6len-IPv4len { + // Not the right place. + return nil + } + if i+IPv4len > IPv6len { + // Not enough room. + return nil + } + ip4 := parseIPv4(s) + if ip4 == nil { + return nil + } + ip[i] = ip4[12] + ip[i+1] = ip4[13] + ip[i+2] = ip4[14] + ip[i+3] = ip4[15] + s = "" + i += IPv4len + break + } + + // Save this 16-bit chunk. + ip[i] = byte(n >> 8) + ip[i+1] = byte(n) + i += 2 + + // Stop at end of string. + s = s[c:] + if len(s) == 0 { + break + } + + // Otherwise must be followed by colon and more. + if s[0] != ':' || len(s) == 1 { + return nil + } + s = s[1:] + + // Look for ellipsis. + if s[0] == ':' { + if ellipsis >= 0 { // already have one + return nil + } + ellipsis = i + s = s[1:] + if len(s) == 0 { // can be at end + break + } + } + } + + // Must have used entire string. + if len(s) != 0 { + return nil + } + + // If didn't parse enough, expand ellipsis. + if i < IPv6len { + if ellipsis < 0 { + return nil + } + n := IPv6len - i + for j := i - 1; j >= ellipsis; j-- { + ip[j+n] = ip[j] + } + for j := ellipsis + n - 1; j >= ellipsis; j-- { + ip[j] = 0 + } + } else if ellipsis >= 0 { + // Ellipsis must represent at least one 0 group. + return nil + } + return ip +} + +// ParseIP parses s as an IP address, returning the result. +// The string s can be in IPv4 dotted decimal ("192.0.2.1"), IPv6 +// ("2001:db8::68"), or IPv4-mapped IPv6 ("::ffff:192.0.2.1") form. +// If s is not a valid textual representation of an IP address, +// ParseIP returns nil. +func ParseIP(s string) IP { + for i := 0; i < len(s); i++ { + switch s[i] { + case '.': + return parseIPv4(s) + case ':': + return parseIPv6(s) + } + } + return nil +} + +// ParseCIDR parses s as a CIDR notation IP address and prefix length, +// like "192.0.2.0/24" or "2001:db8::/32", as defined in +// RFC 4632 and RFC 4291. +// +// It returns the IP address and the network implied by the IP and +// prefix length. +// For example, ParseCIDR("192.0.2.1/24") returns the IP address +// 192.0.2.1 and the network 192.0.2.0/24. +func ParseCIDR(s string) (IP, *IPNet, error) { + i := indexByteString(s, '/') + if i < 0 { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + addr, mask := s[:i], s[i+1:] + iplen := IPv4len + ip := parseIPv4(addr) + if ip == nil { + iplen = IPv6len + ip = parseIPv6(addr) + } + n, i, ok := dtoi(mask) + if ip == nil || !ok || i != len(mask) || n < 0 || n > 8*iplen { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + m := CIDRMask(n, 8*iplen) + return ip, &IPNet{IP: ip.Mask(m), Mask: m}, nil +} + +// This is copied from go/src/internal/bytealg, which includes versions +// optimized for various platforms. Those optimizations are elided here so we +// don't have to maintain them. +func indexByteString(s string, c byte) int { + for i := 0; i < len(s); i++ { + if s[i] == c { + return i + } + } + return -1 +} diff --git a/internal/terraform/ipaddr/ip_test.go b/internal/terraform/ipaddr/ip_test.go new file mode 100644 index 00000000..ddd95aa5 --- /dev/null +++ b/internal/terraform/ipaddr/ip_test.go @@ -0,0 +1,126 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipaddr + +import ( + stdnet "net" + "reflect" + "testing" +) + +// +// Lean on the standard net lib as much as possible. +// +type IPMask = stdnet.IPMask + +var IPv4Mask = stdnet.IPv4Mask + +var parseIPTests = []struct { + in string + out IP +}{ + {"127.0.1.2", IPv4(127, 0, 1, 2)}, + {"127.0.0.1", IPv4(127, 0, 0, 1)}, + {"127.001.002.003", IPv4(127, 1, 2, 3)}, + {"127.007.008.009", IPv4(127, 7, 8, 9)}, + {"127.010.020.030", IPv4(127, 10, 20, 30)}, + {"::ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"::ffff:127.001.002.003", IPv4(127, 1, 2, 3)}, + {"::ffff:127.007.008.009", IPv4(127, 7, 8, 9)}, + {"::ffff:127.010.020.030", IPv4(127, 10, 20, 30)}, + {"::ffff:7f01:0203", IPv4(127, 1, 2, 3)}, + {"0:0:0:0:0000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"0:0:0:0:000000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"0:0:0:0::ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + + {"2001:4860:0:2001::68", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}}, + {"2001:4860:0000:2001:0000:0000:0000:0068", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}}, + + {"-0.0.0.0", nil}, + {"0.-1.0.0", nil}, + {"0.0.-2.0", nil}, + {"0.0.0.-3", nil}, + {"127.0.0.256", nil}, + {"abc", nil}, + {"123:", nil}, + {"fe80::1%lo0", nil}, + {"fe80::1%911", nil}, + {"", nil}, + {"a1:a2:a3:a4::b1:b2:b3:b4", nil}, // Issue 6628 + // + // NOTE: These correct failures were added for go-1.17, but are a + // backwards-incompatible change for Terraform users, who might have + // already written modules using leading zeroes. + // + //{"127.001.002.003", nil}, + //{"::ffff:127.001.002.003", nil}, + //{"123.000.000.000", nil}, + //{"1.2..4", nil}, + //{"0123.0.0.1", nil}, +} + +func TestParseIP(t *testing.T) { + for _, tt := range parseIPTests { + if out := ParseIP(tt.in); !reflect.DeepEqual(out, tt.out) { + t.Errorf("ParseIP(%q) = %v, want %v", tt.in, out, tt.out) + } + } +} + +var parseCIDRTests = []struct { + in string + ip IP + net *IPNet + err error +}{ + {"135.104.0.0/32", IPv4(135, 104, 0, 0), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"0.0.0.0/24", IPv4(0, 0, 0, 0), &IPNet{IP: IPv4(0, 0, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"135.104.0.0/24", IPv4(135, 104, 0, 0), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"135.104.0.1/32", IPv4(135, 104, 0, 1), &IPNet{IP: IPv4(135, 104, 0, 1), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"135.104.0.1/24", IPv4(135, 104, 0, 1), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"127.000.000.001/32", IPv4(127, 0, 0, 1), &IPNet{IP: IPv4(127, 0, 0, 1), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"127.007.008.009/32", IPv4(127, 7, 8, 9), &IPNet{IP: IPv4(127, 7, 8, 9), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"127.010.020.030/32", IPv4(127, 10, 20, 30), &IPNet{IP: IPv4(127, 10, 20, 30), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"::1/128", ParseIP("::1"), &IPNet{IP: ParseIP("::1"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"))}, nil}, + {"abcd:2345::/127", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe"))}, nil}, + {"abcd:2345::/65", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:8000::"))}, nil}, + {"abcd:2345::/64", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff::"))}, nil}, + {"abcd:2345::/63", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:fffe::"))}, nil}, + {"abcd:2345::/33", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:8000::"))}, nil}, + {"abcd:2345::/32", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff::"))}, nil}, + {"abcd:2344::/31", ParseIP("abcd:2344::"), &IPNet{IP: ParseIP("abcd:2344::"), Mask: IPMask(ParseIP("ffff:fffe::"))}, nil}, + {"abcd:2300::/24", ParseIP("abcd:2300::"), &IPNet{IP: ParseIP("abcd:2300::"), Mask: IPMask(ParseIP("ffff:ff00::"))}, nil}, + {"abcd:2345::/24", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2300::"), Mask: IPMask(ParseIP("ffff:ff00::"))}, nil}, + {"2001:DB8::/48", ParseIP("2001:DB8::"), &IPNet{IP: ParseIP("2001:DB8::"), Mask: IPMask(ParseIP("ffff:ffff:ffff::"))}, nil}, + {"2001:DB8::1/48", ParseIP("2001:DB8::1"), &IPNet{IP: ParseIP("2001:DB8::"), Mask: IPMask(ParseIP("ffff:ffff:ffff::"))}, nil}, + {"192.168.1.1/255.255.255.0", nil, nil, &ParseError{Type: "CIDR address", Text: "192.168.1.1/255.255.255.0"}}, + {"192.168.1.1/35", nil, nil, &ParseError{Type: "CIDR address", Text: "192.168.1.1/35"}}, + {"2001:db8::1/-1", nil, nil, &ParseError{Type: "CIDR address", Text: "2001:db8::1/-1"}}, + {"2001:db8::1/-0", nil, nil, &ParseError{Type: "CIDR address", Text: "2001:db8::1/-0"}}, + {"-0.0.0.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "-0.0.0.0/32"}}, + {"0.-1.0.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.-1.0.0/32"}}, + {"0.0.-2.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.-2.0/32"}}, + {"0.0.0.-3/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.0.-3/32"}}, + {"0.0.0.0/-0", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.0.0/-0"}}, + // + // NOTE: Theis correct failure was added for go-1.17, but is a + // backwards-incompatible change for Terraform users, who might have + // already written modules using leading zeroes. + // + //{"127.000.000.001/32", nil, nil, &ParseError{Type: "CIDR address", Text: "127.000.000.001/32"}}, + {"", nil, nil, &ParseError{Type: "CIDR address", Text: ""}}, +} + +func TestParseCIDR(t *testing.T) { + for _, tt := range parseCIDRTests { + ip, net, err := ParseCIDR(tt.in) + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("ParseCIDR(%q) = %v, %v; want %v, %v", tt.in, ip, net, tt.ip, tt.net) + } + if err == nil && (!tt.ip.Equal(ip) || !tt.net.IP.Equal(net.IP) || !reflect.DeepEqual(net.Mask, tt.net.Mask)) { + t.Errorf("ParseCIDR(%q) = %v, {%v, %v}; want %v, {%v, %v}", tt.in, ip, net.IP, net.Mask, tt.ip, tt.net.IP, tt.net.Mask) + } + } +} diff --git a/internal/terraform/ipaddr/parse.go b/internal/terraform/ipaddr/parse.go new file mode 100644 index 00000000..07d6eece --- /dev/null +++ b/internal/terraform/ipaddr/parse.go @@ -0,0 +1,54 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Simple file i/o and string manipulation, to avoid +// depending on strconv and bufio and strings. + +package ipaddr + +// Bigger than we need, not too big to worry about overflow +const big = 0xFFFFFF + +// Decimal to integer. +// Returns number, characters consumed, success. +func dtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + n = n*10 + int(s[i]-'0') + if n >= big { + return big, i, false + } + } + if i == 0 { + return 0, 0, false + } + return n, i, true +} + +// Hexadecimal to integer. +// Returns number, characters consumed, success. +func xtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s); i++ { + if '0' <= s[i] && s[i] <= '9' { + n *= 16 + n += int(s[i] - '0') + } else if 'a' <= s[i] && s[i] <= 'f' { + n *= 16 + n += int(s[i]-'a') + 10 + } else if 'A' <= s[i] && s[i] <= 'F' { + n *= 16 + n += int(s[i]-'A') + 10 + } else { + break + } + if n >= big { + return 0, i, false + } + } + if i == 0 { + return 0, i, false + } + return n, i, true +} diff --git a/internal/terraform/lang/blocktoattr/doc.go b/internal/terraform/lang/blocktoattr/doc.go new file mode 100644 index 00000000..8f89909c --- /dev/null +++ b/internal/terraform/lang/blocktoattr/doc.go @@ -0,0 +1,5 @@ +// Package blocktoattr includes some helper functions that can perform +// preprocessing on a HCL body where a configschema.Block schema is available +// in order to allow list and set attributes defined in the schema to be +// optionally written by the user as block syntax. +package blocktoattr diff --git a/internal/terraform/lang/blocktoattr/fixup.go b/internal/terraform/lang/blocktoattr/fixup.go new file mode 100644 index 00000000..ddd2eb67 --- /dev/null +++ b/internal/terraform/lang/blocktoattr/fixup.go @@ -0,0 +1,255 @@ +package blocktoattr + +import ( + "log" + + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization +// functionality to allow attributes that are specified as having list or set +// type in the schema to be written with HCL block syntax as multiple nested +// blocks with the attribute name as the block type. +// +// The fixup is only applied in the absence of structural attribute types. The +// presence of these types indicate the use of a provider which does not +// support mapping blocks to attributes. +// +// This partially restores some of the block/attribute confusion from HCL 1 +// so that existing patterns that depended on that confusion can continue to +// be used in the short term while we settle on a longer-term strategy. +// +// Most of the fixup work is actually done when the returned body is +// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual +// decode of the body might not, if the content of the body is so ambiguous +// that there's no safe way to map it to the schema. +func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { + // The schema should never be nil, but in practice it seems to be sometimes + // in the presence of poorly-configured test mocks, so we'll be robust + // by synthesizing an empty one. + if schema == nil { + schema = &configschema.Block{} + } + + if skipFixup(schema) { + // we don't have any context for the resource name or type, but + // hopefully this could help locate the evaluation in the logs if there + // were a problem + log.Println("[DEBUG] skipping FixUpBlockAttrs") + return body + } + + return &fixupBody{ + original: body, + schema: schema, + names: ambiguousNames(schema), + } +} + +// skipFixup detects any use of Attribute.NestedType, or Types which could not +// be generate by the legacy SDK when taking SchemaConfigModeAttr into account. +func skipFixup(schema *configschema.Block) bool { + for _, attr := range schema.Attributes { + if attr.NestedType != nil { + return true + } + ty := attr.Type + + // Lists and sets of objects could be generated by + // SchemaConfigModeAttr, but some other combinations can be ruled out. + + // Tuples and objects could not be generated at all. + if ty.IsTupleType() || ty.IsObjectType() { + return true + } + + // A map of objects was not possible. + if ty.IsMapType() && ty.ElementType().IsObjectType() { + return true + } + + // Nested collections were not really supported, but could be generated + // with string types (though we conservatively limit this to primitive types) + if ty.IsCollectionType() { + ety := ty.ElementType() + if ety.IsCollectionType() && !ety.ElementType().IsPrimitiveType() { + return true + } + } + } + + for _, block := range schema.BlockTypes { + if skipFixup(&block.Block) { + return true + } + } + + return false +} + +type fixupBody struct { + original hcl.Body + schema *configschema.Block + names map[string]struct{} +} + +type unknownBlock interface { + Unknown() bool +} + +func (b *fixupBody) Unknown() bool { + if u, ok := b.original.(unknownBlock); ok { + return u.Unknown() + } + return false +} + +// Content decodes content from the body. The given schema must be the lower-level +// representation of the same schema that was previously passed to FixUpBlockAttrs, +// or else the result is undefined. +func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, diags := b.original.Content(schema) + return b.fixupContent(content), diags +} + +func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, remain, diags := b.original.PartialContent(schema) + remain = &fixupBody{ + original: remain, + schema: b.schema, + names: b.names, + } + return b.fixupContent(content), remain, diags +} + +func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // FixUpBlockAttrs is not intended to be used in situations where we'd use + // JustAttributes, so we just pass this through verbatim to complete our + // implementation of hcl.Body. + return b.original.JustAttributes() +} + +func (b *fixupBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} + +// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's +// content to determine whether the author has used attribute or block syntax +// for each of the ambigious attributes where both are permitted. +// +// The resulting schema will always contain all of the same names that are +// in the given schema, but some attribute schemas may instead be replaced by +// block header schemas. +func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { + return effectiveSchema(given, b.original, b.names, true) +} + +func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { + var ret hcl.BodyContent + ret.Attributes = make(hcl.Attributes) + for name, attr := range content.Attributes { + ret.Attributes[name] = attr + } + blockAttrVals := make(map[string][]*hcl.Block) + for _, block := range content.Blocks { + if _, exists := b.names[block.Type]; exists { + // If we get here then we've found a block type whose instances need + // to be re-interpreted as a list-of-objects attribute. We'll gather + // those up and fix them up below. + blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) + continue + } + + // We need to now re-wrap our inner body so it will be subject to the + // same attribute-as-block fixup when recursively decoded. + retBlock := *block // shallow copy + if blockS, ok := b.schema.BlockTypes[block.Type]; ok { + // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then + retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) + } + + ret.Blocks = append(ret.Blocks, &retBlock) + } + // No we'll install synthetic attributes for each of our fixups. We can't + // do this exactly because HCL's information model expects an attribute + // to be a single decl but we have multiple separate blocks. We'll + // approximate things, then, by using only our first block for the source + // location information. (We are guaranteed at least one by the above logic.) + for name, blocks := range blockAttrVals { + ret.Attributes[name] = &hcl.Attribute{ + Name: name, + Expr: &fixupBlocksExpr{ + blocks: blocks, + ety: b.schema.Attributes[name].Type.ElementType(), + }, + + Range: blocks[0].DefRange, + NameRange: blocks[0].TypeRange, + } + } + + ret.MissingItemRange = b.MissingItemRange() + return &ret +} + +type fixupBlocksExpr struct { + blocks hcl.Blocks + ety cty.Type +} + +func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // In order to produce a suitable value for our expression we need to + // now decode the whole descendent block structure under each of our block + // bodies. + // + // That requires us to do something rather strange: we must construct a + // synthetic block type schema derived from the element type of the + // attribute, thus inverting our usual direction of lowering a schema + // into an implied type. Because a type is less detailed than a schema, + // the result is imprecise and in particular will just consider all + // the attributes to be optional and let the provider eventually decide + // whether to return errors if they turn out to be null when required. + schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety + spec := schema.DecoderSpec() + + vals := make([]cty.Value, len(e.blocks)) + var diags hcl.Diagnostics + for i, block := range e.blocks { + body := FixUpBlockAttrs(block.Body, schema) + val, blockDiags := hcldec.Decode(body, spec, ctx) + diags = append(diags, blockDiags...) + if val == cty.NilVal { + val = cty.UnknownVal(e.ety) + } + vals[i] = val + } + if len(vals) == 0 { + return cty.ListValEmpty(e.ety), diags + } + return cty.ListVal(vals), diags +} + +func (e *fixupBlocksExpr) Variables() []hcl.Traversal { + var ret []hcl.Traversal + schema := SchemaForCtyElementType(e.ety) + spec := schema.DecoderSpec() + for _, block := range e.blocks { + ret = append(ret, hcldec.Variables(block.Body, spec)...) + } + return ret +} + +func (e *fixupBlocksExpr) Range() hcl.Range { + // This is not really an appropriate range for the expression but it's + // the best we can do from here. + return e.blocks[0].DefRange +} + +func (e *fixupBlocksExpr) StartRange() hcl.Range { + return e.blocks[0].DefRange +} diff --git a/internal/terraform/lang/blocktoattr/fixup_bench_test.go b/internal/terraform/lang/blocktoattr/fixup_bench_test.go new file mode 100644 index 00000000..98546d96 --- /dev/null +++ b/internal/terraform/lang/blocktoattr/fixup_bench_test.go @@ -0,0 +1,97 @@ +package blocktoattr + +import ( + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNestedBlock(nesting int) *configschema.NestedBlock { + ret := &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": {Type: cty.String, Required: true}, + "b": {Type: cty.String, Optional: true}, + }, + }, + } + if nesting > 0 { + ret.BlockTypes = map[string]*configschema.NestedBlock{ + "nested0": ambiguousNestedBlock(nesting - 1), + "nested1": ambiguousNestedBlock(nesting - 1), + "nested2": ambiguousNestedBlock(nesting - 1), + "nested3": ambiguousNestedBlock(nesting - 1), + "nested4": ambiguousNestedBlock(nesting - 1), + "nested5": ambiguousNestedBlock(nesting - 1), + "nested6": ambiguousNestedBlock(nesting - 1), + "nested7": ambiguousNestedBlock(nesting - 1), + "nested8": ambiguousNestedBlock(nesting - 1), + "nested9": ambiguousNestedBlock(nesting - 1), + } + } + return ret +} + +func schemaWithAmbiguousNestedBlock(nesting int) *configschema.Block { + return &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "maybe_block": ambiguousNestedBlock(nesting), + }, + } +} + +const configForFixupBlockAttrsBenchmark = ` +maybe_block { + a = "hello" + b = "world" + nested0 { + a = "the" + nested1 { + a = "deeper" + nested2 { + a = "we" + nested3 { + a = "go" + b = "inside" + } + } + } + } +} +` + +func configBodyForFixupBlockAttrsBenchmark() hcl.Body { + f, diags := hclsyntax.ParseConfig([]byte(configForFixupBlockAttrsBenchmark), "", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + panic("test configuration is invalid") + } + return f.Body +} + +func BenchmarkFixUpBlockAttrs(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + body := configBodyForFixupBlockAttrsBenchmark() + schema := schemaWithAmbiguousNestedBlock(5) + b.StartTimer() + + spec := schema.DecoderSpec() + fixedBody := FixUpBlockAttrs(body, schema) + val, diags := hcldec.Decode(fixedBody, spec, nil) + if diags.HasErrors() { + b.Fatal("diagnostics during decoding", diags) + } + if !val.Type().IsObjectType() { + b.Fatal("result is not an object") + } + blockVal := val.GetAttr("maybe_block") + if !blockVal.Type().IsListType() || blockVal.LengthInt() != 1 { + b.Fatal("result has wrong value for 'maybe_block'") + } + } +} diff --git a/internal/terraform/lang/blocktoattr/fixup_test.go b/internal/terraform/lang/blocktoattr/fixup_test.go new file mode 100644 index 00000000..099a739c --- /dev/null +++ b/internal/terraform/lang/blocktoattr/fixup_test.go @@ -0,0 +1,521 @@ +package blocktoattr + +import ( + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/zclconf/go-cty/cty" +) + +func TestFixUpBlockAttrs(t *testing.T) { + fooSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + Optional: true, + }, + }, + } + + tests := map[string]struct { + src string + json bool + schema *configschema.Block + want cty.Value + wantErrs bool + }{ + "empty": { + src: ``, + schema: &configschema.Block{}, + want: cty.EmptyObjectVal, + }, + "empty JSON": { + src: `{}`, + json: true, + schema: &configschema.Block{}, + want: cty.EmptyObjectVal, + }, + "unset": { + src: ``, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(fooSchema.Attributes["foo"].Type), + }), + }, + "unset JSON": { + src: `{}`, + json: true, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(fooSchema.Attributes["foo"].Type), + }), + }, + "no fixup required, with one value": { + src: ` +foo = [ + { + bar = "baz" + }, +] +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + }), + }), + }, + "no fixup required, with two values": { + src: ` +foo = [ + { + bar = "baz" + }, + { + bar = "boop" + }, +] +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + }, + "no fixup required, with values, JSON": { + src: `{"foo": [{"bar": "baz"}]}`, + json: true, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + }), + }), + }, + "no fixup required, empty": { + src: ` +foo = [] +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListValEmpty(fooSchema.Attributes["foo"].Type.ElementType()), + }), + }, + "no fixup required, empty, JSON": { + src: `{"foo":[]}`, + json: true, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListValEmpty(fooSchema.Attributes["foo"].Type.ElementType()), + }), + }, + "fixup one block": { + src: ` +foo { + bar = "baz" +} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + }), + }), + }, + "fixup one block omitting attribute": { + src: ` +foo {} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.String), + }), + }), + }), + }, + "fixup two blocks": { + src: ` +foo { + bar = baz +} +foo { + bar = "boop" +} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + }, + "interaction with dynamic block generation": { + src: ` +dynamic "foo" { + for_each = ["baz", beep] + content { + bar = foo.value + } +} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep value"), + }), + }), + }), + }, + "dynamic block with empty iterator": { + src: ` +dynamic "foo" { + for_each = [] + content { + bar = foo.value + } +} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(fooSchema.Attributes["foo"].Type), + }), + }, + "both attribute and block syntax": { + src: ` +foo = [] +foo { + bar = "baz" +} +`, + schema: fooSchema, + wantErrs: true, // Unsupported block type (user must be consistent about whether they consider foo to be a block type or an attribute) + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + }, + "fixup inside block": { + src: ` +container { + foo { + bar = "baz" + } + foo { + bar = "boop" + } +} +container { + foo { + bar = beep + } +} +`, + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "container": { + Nesting: configschema.NestingList, + Block: *fooSchema, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep value"), + }), + }), + }), + }), + }), + }, + "fixup inside attribute-as-block": { + src: ` +container { + foo { + bar = "baz" + } + foo { + bar = "boop" + } +} +container { + foo { + bar = beep + } +} +`, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "container": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + })), + Optional: true, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep value"), + }), + }), + }), + }), + }), + }, + "nested fixup with dynamic block generation": { + src: ` +container { + dynamic "foo" { + for_each = ["baz", beep] + content { + bar = foo.value + } + } +} +`, + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "container": { + Nesting: configschema.NestingList, + Block: *fooSchema, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep value"), + }), + }), + }), + }), + }), + }, + + "missing nested block items": { + src: ` +container { + foo { + bar = "one" + } +} +`, + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "container": { + Nesting: configschema.NestingList, + MinItems: 2, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + Optional: true, + }, + }, + }, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + }), + }), + }), + }), + wantErrs: true, + }, + "no fixup allowed with NestedType": { + src: ` + container { + foo = "one" + } + `, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "container": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + }, + }, + }, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.NullVal(cty.List( + cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + )), + }), + wantErrs: true, + }, + "no fixup allowed new types": { + src: ` + container { + foo = "one" + } + `, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // This could be a ConfigModeAttr fixup + "container": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + }, + // But the presence of this type means it must have been + // declared by a new SDK + "new_type": { + Type: cty.Object(map[string]cty.Type{ + "boo": cty.String, + }), + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.NullVal(cty.List( + cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + )), + }), + wantErrs: true, + }, + } + + ctx := &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "bar": cty.StringVal("bar value"), + "baz": cty.StringVal("baz value"), + "beep": cty.StringVal("beep value"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var f *hcl.File + var diags hcl.Diagnostics + if test.json { + f, diags = hcljson.Parse([]byte(test.src), "test.tf.json") + } else { + f, diags = hclsyntax.ParseConfig([]byte(test.src), "test.tf", hcl.Pos{Line: 1, Column: 1}) + } + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("unexpected diagnostic: %s", diag) + } + t.FailNow() + } + + // We'll expand dynamic blocks in the body first, to mimic how + // we process this fixup when using the main "lang" package API. + spec := test.schema.DecoderSpec() + body := dynblock.Expand(f.Body, ctx) + + body = FixUpBlockAttrs(body, test.schema) + got, diags := hcldec.Decode(body, spec, ctx) + + if test.wantErrs { + if !diags.HasErrors() { + t.Errorf("succeeded, but want error\ngot: %#v", got) + } + + // check that our wrapped body returns the correct context by + // verifying the Subject is valid. + for _, d := range diags { + if d.Subject.Filename == "" { + t.Errorf("empty diagnostic subject: %#v", d.Subject) + } + } + return + } + + if !test.want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.want) + } + for _, diag := range diags { + t.Errorf("unexpected diagnostic: %s", diag) + } + }) + } +} diff --git a/internal/terraform/lang/blocktoattr/schema.go b/internal/terraform/lang/blocktoattr/schema.go new file mode 100644 index 00000000..4ac73cb6 --- /dev/null +++ b/internal/terraform/lang/blocktoattr/schema.go @@ -0,0 +1,146 @@ +package blocktoattr + +import ( + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNames(schema *configschema.Block) map[string]struct{} { + if schema == nil { + return nil + } + ambiguousNames := make(map[string]struct{}) + for name, attrS := range schema.Attributes { + aty := attrS.Type + if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { + ambiguousNames[name] = struct{}{} + } + } + return ambiguousNames +} + +func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { + ret := &hcl.BodySchema{} + + appearsAsBlock := make(map[string]struct{}) + { + // We'll construct some throwaway schemas here just to probe for + // whether each of our ambiguous names seems to be being used as + // an attribute or a block. We need to check both because in JSON + // syntax we rely on the schema to decide between attribute or block + // interpretation and so JSON will always answer yes to both of + // these questions and we want to prefer the attribute interpretation + // in that case. + var probeSchema hcl.BodySchema + + for name := range ambiguousNames { + probeSchema = hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: name, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + if _, exists := content.Attributes[name]; exists { + // Can decode as an attribute, so we'll go with that. + continue + } + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: name, + }, + }, + } + content, _, _ = body.PartialContent(&probeSchema) + if len(content.Blocks) > 0 || dynamicExpanded { + // A dynamic block with an empty iterator returns nothing. + // If there's no attribute and we have either a block or a + // dynamic expansion, we need to rewrite this one as a + // block for a successful result. + appearsAsBlock[name] = struct{}{} + } + } + if !dynamicExpanded { + // If we're deciding for a context where dynamic blocks haven't + // been expanded yet then we need to probe for those too. + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "dynamic", + LabelNames: []string{"type"}, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + for _, block := range content.Blocks { + if _, exists := ambiguousNames[block.Labels[0]]; exists { + appearsAsBlock[block.Labels[0]] = struct{}{} + } + } + } + } + + for _, attrS := range given.Attributes { + if _, exists := appearsAsBlock[attrS.Name]; exists { + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: attrS.Name, + }) + } else { + ret.Attributes = append(ret.Attributes, attrS) + } + } + + // Anything that is specified as a block type in the input schema remains + // that way by just passing through verbatim. + ret.Blocks = append(ret.Blocks, given.Blocks...) + + return ret +} + +// SchemaForCtyElementType converts a cty object type into an +// approximately-equivalent configschema.Block representing the element of +// a list or set. If the given type is not an object type then this +// function will panic. +func SchemaForCtyElementType(ty cty.Type) *configschema.Block { + atys := ty.AttributeTypes() + ret := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute, len(atys)), + } + for name, aty := range atys { + ret.Attributes[name] = &configschema.Attribute{ + Type: aty, + Optional: true, + } + } + return ret +} + +// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type +// into an approximately-equivalent configschema.NestedBlock. If the given type +// is not of the expected kind then this function will panic. +func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch { + case ty.IsListType(): + nesting = configschema.NestingList + case ty.IsSetType(): + nesting = configschema.NestingSet + default: + panic("unsuitable type") + } + nested := SchemaForCtyElementType(ty.ElementType()) + return &configschema.NestedBlock{ + Nesting: nesting, + Block: *nested, + } +} + +// TypeCanBeBlocks returns true if the given type is a list-of-object or +// set-of-object type, and would thus be subject to the blocktoattr fixup +// if used as an attribute type. +func TypeCanBeBlocks(ty cty.Type) bool { + return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() +} diff --git a/internal/terraform/lang/blocktoattr/variables.go b/internal/terraform/lang/blocktoattr/variables.go new file mode 100644 index 00000000..8b653cf2 --- /dev/null +++ b/internal/terraform/lang/blocktoattr/variables.go @@ -0,0 +1,45 @@ +package blocktoattr + +import ( + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" +) + +// ExpandedVariables finds all of the global variables referenced in the +// given body with the given schema while taking into account the possibilities +// both of "dynamic" blocks being expanded and the possibility of certain +// attributes being written instead as nested blocks as allowed by the +// FixUpBlockAttrs function. +// +// This function exists to allow variables to be analyzed prior to dynamic +// block expansion while also dealing with the fact that dynamic block expansion +// might in turn produce nested blocks that are subject to FixUpBlockAttrs. +// +// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, +// which is itself a drop-in replacement for hcldec.Variables. +func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { + rootNode := dynblock.WalkVariables(body) + return walkVariables(rootNode, body, schema) +} + +func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { + givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + ambiguousNames := ambiguousNames(schema) + effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) + vars, children := node.Visit(effectiveRawSchema) + + for _, child := range children { + if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { + vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) + } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { + // ☝️Check for collection type before element type, because if this is a mis-placed reference, + // a panic here will prevent other useful diags from being elevated to show the user what to fix + synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) + vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) + } + } + + return vars +} diff --git a/internal/terraform/lang/blocktoattr/variables_test.go b/internal/terraform/lang/blocktoattr/variables_test.go new file mode 100644 index 00000000..25550c10 --- /dev/null +++ b/internal/terraform/lang/blocktoattr/variables_test.go @@ -0,0 +1,200 @@ +package blocktoattr + +import ( + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/zclconf/go-cty/cty" +) + +func TestExpandedVariables(t *testing.T) { + fooSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + Optional: true, + }, + "bar": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + } + + tests := map[string]struct { + src string + json bool + schema *configschema.Block + want []hcl.Traversal + }{ + "empty": { + src: ``, + schema: &configschema.Block{}, + want: nil, + }, + "attribute syntax": { + src: ` +foo = [ + { + bar = baz + }, +] +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 4, Column: 11, Byte: 23}, + End: hcl.Pos{Line: 4, Column: 14, Byte: 26}, + }, + }, + }, + }, + }, + "block syntax": { + src: ` +foo { + bar = baz +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 9, Byte: 15}, + End: hcl.Pos{Line: 3, Column: 12, Byte: 18}, + }, + }, + }, + }, + }, + "block syntax with nested blocks": { + src: ` +foo { + bar { + boop = baz + } +} +`, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + })), + })), + Optional: true, + }, + }, + }, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 4, Column: 12, Byte: 26}, + End: hcl.Pos{Line: 4, Column: 15, Byte: 29}, + }, + }, + }, + }, + }, + "dynamic block syntax": { + src: ` +dynamic "foo" { + for_each = beep + content { + bar = baz + } +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "beep", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 14, Byte: 30}, + End: hcl.Pos{Line: 3, Column: 18, Byte: 34}, + }, + }, + }, + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 5, Column: 11, Byte: 57}, + End: hcl.Pos{Line: 5, Column: 14, Byte: 60}, + }, + }, + }, + }, + }, + "misplaced dynamic block": { + src: ` +dynamic "bar" { + for_each = beep + content { + key = val + } +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "beep", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 14, Byte: 30}, + End: hcl.Pos{Line: 3, Column: 18, Byte: 34}, + }, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var f *hcl.File + var diags hcl.Diagnostics + if test.json { + f, diags = hcljson.Parse([]byte(test.src), "test.tf.json") + } else { + f, diags = hclsyntax.ParseConfig([]byte(test.src), "test.tf", hcl.Pos{Line: 1, Column: 1}) + } + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("unexpected diagnostic: %s", diag) + } + t.FailNow() + } + + got := ExpandedVariables(f.Body, test.schema) + + co := cmpopts.IgnoreUnexported(hcl.TraverseRoot{}) + if !cmp.Equal(got, test.want, co) { + t.Errorf("wrong result\n%s", cmp.Diff(test.want, got, co)) + } + }) + } + +} diff --git a/internal/terraform/lang/data.go b/internal/terraform/lang/data.go new file mode 100644 index 00000000..44d3d2e5 --- /dev/null +++ b/internal/terraform/lang/data.go @@ -0,0 +1,33 @@ +package lang + +import ( + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Data is an interface whose implementations can provide cty.Value +// representations of objects identified by referenceable addresses from +// the addrs package. +// +// This interface will grow each time a new type of reference is added, and so +// implementations outside of the Terraform codebases are not advised. +// +// Each method returns a suitable value and optionally some diagnostics. If the +// returned diagnostics contains errors then the type of the returned value is +// used to construct an unknown value of the same type which is then used in +// place of the requested object so that type checking can still proceed. In +// cases where it's not possible to even determine a suitable result type, +// cty.DynamicVal is returned along with errors describing the problem. +type Data interface { + StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics + + GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetResource(addrs.Resource, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModule(addrs.ModuleCall, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) +} diff --git a/internal/terraform/lang/data_test.go b/internal/terraform/lang/data_test.go new file mode 100644 index 00000000..8be6993f --- /dev/null +++ b/internal/terraform/lang/data_test.go @@ -0,0 +1,62 @@ +package lang + +import ( + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +type dataForTests struct { + CountAttrs map[string]cty.Value + ForEachAttrs map[string]cty.Value + Resources map[string]cty.Value + LocalValues map[string]cty.Value + Modules map[string]cty.Value + PathAttrs map[string]cty.Value + TerraformAttrs map[string]cty.Value + InputVariables map[string]cty.Value +} + +var _ Data = &dataForTests{} + +func (d *dataForTests) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + return nil // does nothing in this stub implementation +} + +func (d *dataForTests) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.CountAttrs[addr.Name], nil +} + +func (d *dataForTests) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.ForEachAttrs[addr.Name], nil +} + +func (d *dataForTests) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.Resources[addr.String()], nil +} + +func (d *dataForTests) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.InputVariables[addr.Name], nil +} + +func (d *dataForTests) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.LocalValues[addr.Name], nil +} + +func (d *dataForTests) GetModule(addr addrs.ModuleCall, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.Modules[addr.String()], nil +} + +func (d *dataForTests) GetModuleInstanceOutput(addr addrs.ModuleCallInstanceOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // This will panic if the module object does not have the requested attribute + obj := d.Modules[addr.Call.String()] + return obj.GetAttr(addr.Name), nil +} + +func (d *dataForTests) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.PathAttrs[addr.Name], nil +} + +func (d *dataForTests) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.TerraformAttrs[addr.Name], nil +} diff --git a/internal/terraform/lang/doc.go b/internal/terraform/lang/doc.go new file mode 100644 index 00000000..af5c5cac --- /dev/null +++ b/internal/terraform/lang/doc.go @@ -0,0 +1,5 @@ +// Package lang deals with the runtime aspects of Terraform's configuration +// language, with concerns such as expression evaluation. It is closely related +// to sibling package "configs", which is responsible for configuration +// parsing and static validation. +package lang diff --git a/internal/terraform/lang/eval.go b/internal/terraform/lang/eval.go new file mode 100644 index 00000000..33986e15 --- /dev/null +++ b/internal/terraform/lang/eval.go @@ -0,0 +1,452 @@ +package lang + +import ( + "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/camptocamp/terraboard/internal/terraform/instances" + "github.com/camptocamp/terraboard/internal/terraform/lang/blocktoattr" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// ExpandBlock expands any "dynamic" blocks present in the given body. The +// result is a body with those blocks expanded, ready to be evaluated with +// EvalBlock. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + traversals := dynblock.ExpandVariablesHCLDec(body, spec) + refs, diags := References(traversals) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + + return dynblock.Expand(body, ctx), diags +} + +// EvalBlock evaluates the given body using the given block schema and returns +// a cty object value representing its contents. The type of the result conforms +// to the implied type of the given schema. +// +// This function does not automatically expand "dynamic" blocks within the +// body. If that is desired, first call the ExpandBlock method to obtain +// an expanded body to pass to this method. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + refs, diags := ReferencesInBlock(body, schema) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // HACK: In order to remain compatible with some assumptions made in + // Terraform v0.11 and earlier about the approximate equivalence of + // attribute vs. block syntax, we do a just-in-time fixup here to allow + // any attribute in the schema that has a list-of-objects or set-of-objects + // kind to potentially be populated instead by one or more nested blocks + // whose type is the attribute name. + body = blocktoattr.FixUpBlockAttrs(body, schema) + + val, evalDiags := hcldec.Decode(body, spec, ctx) + diags = diags.Append(evalDiags) + + return val, diags +} + +// EvalSelfBlock evaluates the given body only within the scope of the provided +// object and instance key data. References to the object must use self, and the +// key data will only contain count.index or each.key. The static values for +// terraform and path will also be available in this context. +func (s *Scope) EvalSelfBlock(body hcl.Body, self cty.Value, schema *configschema.Block, keyData instances.RepetitionData) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + spec := schema.DecoderSpec() + + vals := make(map[string]cty.Value) + vals["self"] = self + + if !keyData.CountIndex.IsNull() { + vals["count"] = cty.ObjectVal(map[string]cty.Value{ + "index": keyData.CountIndex, + }) + } + if !keyData.EachKey.IsNull() { + vals["each"] = cty.ObjectVal(map[string]cty.Value{ + "key": keyData.EachKey, + }) + } + + refs, refDiags := References(hcldec.Variables(body, spec)) + diags = diags.Append(refDiags) + + terraformAttrs := map[string]cty.Value{} + pathAttrs := map[string]cty.Value{} + + // We could always load the static values for Path and Terraform values, + // but we want to parse the references so that we can get source ranges for + // user diagnostics. + for _, ref := range refs { + // we already loaded the self value + if ref.Subject == addrs.Self { + continue + } + + switch subj := ref.Subject.(type) { + case addrs.PathAttr: + val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, ref.SourceRange)) + diags = diags.Append(valDiags) + pathAttrs[subj.Name] = val + + case addrs.TerraformAttr: + val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, ref.SourceRange)) + diags = diags.Append(valDiags) + terraformAttrs[subj.Name] = val + + case addrs.CountAttr, addrs.ForEachAttr: + // each and count have already been handled. + + default: + // This should have been caught in validation, but point the user + // to the correct location in case something slipped through. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid reference`, + Detail: fmt.Sprintf("The reference to %q is not valid in this context", ref.Subject), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + + vals["path"] = cty.ObjectVal(pathAttrs) + vals["terraform"] = cty.ObjectVal(terraformAttrs) + + ctx := &hcl.EvalContext{ + Variables: vals, + Functions: s.Functions(), + } + + val, decDiags := hcldec.Decode(body, schema.DecoderSpec(), ctx) + diags = diags.Append(decDiags) + return val, diags +} + +// EvalExpr evaluates a single expression in the receiving context and returns +// the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + refs, diags := ReferencesInExpr(expr) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(wantType), diags + } + + val, evalDiags := expr.Value(ctx) + diags = diags.Append(evalDiags) + + if wantType != cty.DynamicPseudoType { + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: ctx, + }) + } + } + + return val, diags +} + +// EvalReference evaluates the given reference in the receiving scope and +// returns the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // We cheat a bit here and just build an EvalContext for our requested + // reference with the "self" address overridden, and then pull the "self" + // result out of it to return. + ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) + diags = diags.Append(ctxDiags) + val := ctx.Variables["self"] + if val == cty.NilVal { + val = cty.DynamicVal + } + + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + + return val, diags +} + +// EvalContext constructs a HCL expression evaluation context whose variable +// scope contains sufficient values to satisfy the given set of references. +// +// Most callers should prefer to use the evaluation helper methods that +// this type offers, but this is here for less common situations where the +// caller will handle the evaluation calls itself. +func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { + return s.evalContext(refs, s.SelfAddr) +} + +func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { + if s == nil { + panic("attempt to construct EvalContext for nil Scope") + } + + var diags tfdiags.Diagnostics + vals := make(map[string]cty.Value) + funcs := s.Functions() + ctx := &hcl.EvalContext{ + Variables: vals, + Functions: funcs, + } + + if len(refs) == 0 { + // Easy path for common case where there are no references at all. + return ctx, diags + } + + // First we'll do static validation of the references. This catches things + // early that might otherwise not get caught due to unknown values being + // present in the scope during planning. + if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { + diags = diags.Append(staticDiags) + return ctx, diags + } + + // The reference set we are given has not been de-duped, and so there can + // be redundant requests in it for two reasons: + // - The same item is referenced multiple times + // - Both an item and that item's container are separately referenced. + // We will still visit every reference here and ask our data source for + // it, since that allows us to gather a full set of any errors and + // warnings, but once we've gathered all the data we'll then skip anything + // that's redundant in the process of populating our values map. + dataResources := map[string]map[string]cty.Value{} + managedResources := map[string]map[string]cty.Value{} + wholeModules := map[string]cty.Value{} + inputVariables := map[string]cty.Value{} + localValues := map[string]cty.Value{} + pathAttrs := map[string]cty.Value{} + terraformAttrs := map[string]cty.Value{} + countAttrs := map[string]cty.Value{} + forEachAttrs := map[string]cty.Value{} + var self cty.Value + + for _, ref := range refs { + rng := ref.SourceRange + + rawSubj := ref.Subject + if rawSubj == addrs.Self { + if selfAddr == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner, connection, and postcondition blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + continue + } + + if selfAddr == addrs.Self { + // Programming error: the self address cannot alias itself. + panic("scope SelfAddr attempting to alias itself") + } + + // self can only be used within a resource instance + subj := selfAddr.(addrs.ResourceInstance) + + val, valDiags := normalizeRefValue(s.Data.GetResource(subj.ContainingResource(), rng)) + + diags = diags.Append(valDiags) + + // Self is an exception in that it must always resolve to a + // particular instance. We will still insert the full resource into + // the context below. + var hclDiags hcl.Diagnostics + // We should always have a valid self index by this point, but in + // the case of an error, self may end up as a cty.DynamicValue. + switch k := subj.Key.(type) { + case addrs.IntKey: + self, hclDiags = hcl.Index(val, cty.NumberIntVal(int64(k)), ref.SourceRange.ToHCL().Ptr()) + diags = diags.Append(hclDiags) + case addrs.StringKey: + self, hclDiags = hcl.Index(val, cty.StringVal(string(k)), ref.SourceRange.ToHCL().Ptr()) + diags = diags.Append(hclDiags) + default: + self = val + } + continue + } + + // This type switch must cover all of the "Referenceable" implementations + // in package addrs, however we are removing the possibility of + // Instances beforehand. + switch addr := rawSubj.(type) { + case addrs.ResourceInstance: + rawSubj = addr.ContainingResource() + case addrs.ModuleCallInstance: + rawSubj = addr.Call + case addrs.ModuleCallInstanceOutput: + rawSubj = addr.Call.Call + } + + switch subj := rawSubj.(type) { + case addrs.Resource: + var into map[string]map[string]cty.Value + switch subj.Mode { + case addrs.ManagedResourceMode: + into = managedResources + case addrs.DataResourceMode: + into = dataResources + default: + panic(fmt.Errorf("unsupported ResourceMode %s", subj.Mode)) + } + + val, valDiags := normalizeRefValue(s.Data.GetResource(subj, rng)) + diags = diags.Append(valDiags) + + r := subj + if into[r.Type] == nil { + into[r.Type] = make(map[string]cty.Value) + } + into[r.Type][r.Name] = val + + case addrs.ModuleCall: + val, valDiags := normalizeRefValue(s.Data.GetModule(subj, rng)) + diags = diags.Append(valDiags) + wholeModules[subj.Name] = val + + case addrs.InputVariable: + val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) + diags = diags.Append(valDiags) + inputVariables[subj.Name] = val + + case addrs.LocalValue: + val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) + diags = diags.Append(valDiags) + localValues[subj.Name] = val + + case addrs.PathAttr: + val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) + diags = diags.Append(valDiags) + pathAttrs[subj.Name] = val + + case addrs.TerraformAttr: + val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) + diags = diags.Append(valDiags) + terraformAttrs[subj.Name] = val + + case addrs.CountAttr: + val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) + diags = diags.Append(valDiags) + countAttrs[subj.Name] = val + + case addrs.ForEachAttr: + val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng)) + diags = diags.Append(valDiags) + forEachAttrs[subj.Name] = val + + default: + // Should never happen + panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) + } + } + + // Managed resources are exposed in two different locations. The primary + // is at the top level where the resource type name is the root of the + // traversal, but we also expose them under "resource" as an escaping + // technique if we add a reserved name in a future language edition which + // conflicts with someone's existing provider. + for k, v := range buildResourceObjects(managedResources) { + vals[k] = v + } + vals["resource"] = cty.ObjectVal(buildResourceObjects(managedResources)) + + vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) + vals["module"] = cty.ObjectVal(wholeModules) + vals["var"] = cty.ObjectVal(inputVariables) + vals["local"] = cty.ObjectVal(localValues) + vals["path"] = cty.ObjectVal(pathAttrs) + vals["terraform"] = cty.ObjectVal(terraformAttrs) + vals["count"] = cty.ObjectVal(countAttrs) + vals["each"] = cty.ObjectVal(forEachAttrs) + if self != cty.NilVal { + vals["self"] = self + } + + return ctx, diags +} + +func buildResourceObjects(resources map[string]map[string]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + for typeName, nameVals := range resources { + vals[typeName] = cty.ObjectVal(nameVals) + } + return vals +} + +func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { + if diags.HasErrors() { + // If there are errors then we will force an unknown result so that + // we can still evaluate and catch type errors but we'll avoid + // producing redundant re-statements of the same errors we've already + // dealt with here. + return cty.UnknownVal(val.Type()), diags + } + return val, diags +} diff --git a/internal/terraform/lang/eval_test.go b/internal/terraform/lang/eval_test.go new file mode 100644 index 00000000..395aed73 --- /dev/null +++ b/internal/terraform/lang/eval_test.go @@ -0,0 +1,844 @@ +package lang + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/camptocamp/terraboard/internal/terraform/instances" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func TestScopeEvalContext(t *testing.T) { + data := &dataForTests{ + CountAttrs: map[string]cty.Value{ + "index": cty.NumberIntVal(0), + }, + ForEachAttrs: map[string]cty.Value{ + "key": cty.StringVal("a"), + "value": cty.NumberIntVal(1), + }, + Resources: map[string]cty.Value{ + "null_resource.foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + "data.null_data_source.foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + "null_resource.multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + "null_resource.each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + "null_resource.multi[1]": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }, + LocalValues: map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }, + Modules: map[string]cty.Value{ + "module.foo": cty.ObjectVal(map[string]cty.Value{ + "output0": cty.StringVal("bar0"), + "output1": cty.StringVal("bar1"), + }), + }, + PathAttrs: map[string]cty.Value{ + "module": cty.StringVal("foo/bar"), + }, + TerraformAttrs: map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }, + InputVariables: map[string]cty.Value{ + "baz": cty.StringVal("boop"), + }, + } + + tests := []struct { + Expr string + Want map[string]cty.Value + }{ + { + `12`, + map[string]cty.Value{}, + }, + { + `count.index`, + map[string]cty.Value{ + "count": cty.ObjectVal(map[string]cty.Value{ + "index": cty.NumberIntVal(0), + }), + }, + }, + { + `each.key`, + map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "key": cty.StringVal("a"), + }), + }, + }, + { + `each.value`, + map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "value": cty.NumberIntVal(1), + }), + }, + }, + { + `local.foo`, + map[string]cty.Value{ + "local": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + }, + }, + { + `null_resource.foo`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + }), + }, + }, + { + `null_resource.foo.attr`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + }), + }, + }, + { + `null_resource.multi`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + }), + }, + }, + { + // at this level, all instance references return the entire resource + `null_resource.multi[1]`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + }), + }, + }, + { + // at this level, all instance references return the entire resource + `null_resource.each["each1"]`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + }), + }), + }, + }, + { + // at this level, all instance references return the entire resource + `null_resource.each["each1"].attr`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + }), + }), + }, + }, + { + `foo(null_resource.multi, null_resource.multi[1])`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + }), + }, + }, + { + `data.null_data_source.foo`, + map[string]cty.Value{ + "data": cty.ObjectVal(map[string]cty.Value{ + "null_data_source": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + }), + }, + }, + { + `module.foo`, + map[string]cty.Value{ + "module": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "output0": cty.StringVal("bar0"), + "output1": cty.StringVal("bar1"), + }), + }), + }, + }, + // any module reference returns the entire module + { + `module.foo.output1`, + map[string]cty.Value{ + "module": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "output0": cty.StringVal("bar0"), + "output1": cty.StringVal("bar1"), + }), + }), + }, + }, + { + `path.module`, + map[string]cty.Value{ + "path": cty.ObjectVal(map[string]cty.Value{ + "module": cty.StringVal("foo/bar"), + }), + }, + }, + { + `self.baz`, + map[string]cty.Value{ + "self": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }, + }, + { + `terraform.workspace`, + map[string]cty.Value{ + "terraform": cty.ObjectVal(map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }), + }, + }, + { + `var.baz`, + map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("boop"), + }), + }, + }, + } + + for _, test := range tests { + t.Run(test.Expr, func(t *testing.T) { + expr, parseDiags := hclsyntax.ParseExpression([]byte(test.Expr), "", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Errorf("- %s", diag) + } + return + } + + refs, refsDiags := ReferencesInExpr(expr) + if refsDiags.HasErrors() { + t.Fatal(refsDiags.Err()) + } + + scope := &Scope{ + Data: data, + + // "self" will just be an arbitrary one of the several resource + // instances we have in our test dataset. + SelfAddr: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "multi", + }, + Key: addrs.IntKey(1), + }, + } + ctx, ctxDiags := scope.EvalContext(refs) + if ctxDiags.HasErrors() { + t.Fatal(ctxDiags.Err()) + } + + // For easier test assertions we'll just remove any top-level + // empty objects from our variables map. + for k, v := range ctx.Variables { + if v.RawEquals(cty.EmptyObjectVal) { + delete(ctx.Variables, k) + } + } + + gotVal := cty.ObjectVal(ctx.Variables) + wantVal := cty.ObjectVal(test.Want) + + if !gotVal.RawEquals(wantVal) { + // We'll JSON-ize our values here just so it's easier to + // read them in the assertion output. + gotJSON := formattedJSONValue(gotVal) + wantJSON := formattedJSONValue(wantVal) + + t.Errorf( + "wrong result\nexpr: %s\ngot: %s\nwant: %s", + test.Expr, gotJSON, wantJSON, + ) + } + }) + } +} + +func TestScopeExpandEvalBlock(t *testing.T) { + nestedObjTy := cty.Object(map[string]cty.Type{ + "boop": cty.String, + }) + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "list_of_obj": {Type: cty.List(nestedObjTy), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + data := &dataForTests{ + LocalValues: map[string]cty.Value{ + "greeting": cty.StringVal("howdy"), + "list": cty.ListVal([]cty.Value{ + cty.StringVal("elem0"), + cty.StringVal("elem1"), + }), + "map": cty.MapVal(map[string]cty.Value{ + "key1": cty.StringVal("val1"), + "key2": cty.StringVal("val2"), + }), + }, + } + + tests := map[string]struct { + Config string + Want cty.Value + }{ + "empty": { + ` + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "literal attribute": { + ` + foo = "hello" + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "variable attribute": { + ` + foo = local.greeting + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("howdy"), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "one static block": { + ` + bar "static" {} + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "static": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.NullVal(cty.String), + }), + }), + }), + }, + "two static blocks": { + ` + bar "static0" { + baz = 0 + } + bar "static1" { + baz = 1 + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "static0": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("0"), + }), + "static1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("1"), + }), + }), + }), + }, + "dynamic blocks from list": { + ` + dynamic "bar" { + for_each = local.list + labels = [bar.value] + content { + baz = bar.key + } + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "elem0": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("0"), + }), + "elem1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("1"), + }), + }), + }), + }, + "dynamic blocks from map": { + ` + dynamic "bar" { + for_each = local.map + labels = [bar.key] + content { + baz = bar.value + } + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "key1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("val1"), + }), + "key2": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("val2"), + }), + }), + }), + }, + "list-of-object attribute": { + ` + list_of_obj = [ + { + boop = local.greeting + }, + ] + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("howdy"), + }), + }), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "list-of-object attribute as blocks": { + ` + list_of_obj { + boop = local.greeting + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("howdy"), + }), + }), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "lots of things at once": { + ` + foo = "whoop" + bar "static0" { + baz = "s0" + } + dynamic "bar" { + for_each = local.list + labels = [bar.value] + content { + baz = bar.key + } + } + bar "static1" { + baz = "s1" + } + dynamic "bar" { + for_each = local.map + labels = [bar.key] + content { + baz = bar.value + } + } + bar "static2" { + baz = "s2" + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("whoop"), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "key1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("val1"), + }), + "key2": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("val2"), + }), + "elem0": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("0"), + }), + "elem1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("1"), + }), + "static0": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("s0"), + }), + "static1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("s1"), + }), + "static2": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("s2"), + }), + }), + }), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + file, parseDiags := hclsyntax.ParseConfig([]byte(test.Config), "", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Errorf("- %s", diag) + } + return + } + + body := file.Body + scope := &Scope{ + Data: data, + } + + body, expandDiags := scope.ExpandBlock(body, schema) + if expandDiags.HasErrors() { + t.Fatal(expandDiags.Err()) + } + + got, valDiags := scope.EvalBlock(body, schema) + if valDiags.HasErrors() { + t.Fatal(valDiags.Err()) + } + + if !got.RawEquals(test.Want) { + // We'll JSON-ize our values here just so it's easier to + // read them in the assertion output. + gotJSON := formattedJSONValue(got) + wantJSON := formattedJSONValue(test.Want) + + t.Errorf( + "wrong result\nconfig: %s\ngot: %s\nwant: %s", + test.Config, gotJSON, wantJSON, + ) + } + + }) + } + +} + +func formattedJSONValue(val cty.Value) string { + val = cty.UnknownAsNull(val) // since JSON can't represent unknowns + j, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + panic(err) + } + var buf bytes.Buffer + json.Indent(&buf, j, "", " ") + return buf.String() +} + +func TestScopeEvalSelfBlock(t *testing.T) { + data := &dataForTests{ + PathAttrs: map[string]cty.Value{ + "module": cty.StringVal("foo/bar"), + "cwd": cty.StringVal("/home/foo/bar"), + "root": cty.StringVal("/home/foo"), + }, + TerraformAttrs: map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }, + } + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": { + Type: cty.String, + }, + "num": { + Type: cty.Number, + }, + }, + } + + tests := []struct { + Config string + Self cty.Value + KeyData instances.RepetitionData + Want map[string]cty.Value + }{ + { + Config: `attr = self.foo`, + Self: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + KeyData: instances.RepetitionData{ + CountIndex: cty.NumberIntVal(0), + }, + Want: map[string]cty.Value{ + "attr": cty.StringVal("bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `num = count.index`, + KeyData: instances.RepetitionData{ + CountIndex: cty.NumberIntVal(0), + }, + Want: map[string]cty.Value{ + "attr": cty.NullVal(cty.String), + "num": cty.NumberIntVal(0), + }, + }, + { + Config: `attr = each.key`, + KeyData: instances.RepetitionData{ + EachKey: cty.StringVal("a"), + }, + Want: map[string]cty.Value{ + "attr": cty.StringVal("a"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.cwd`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("/home/foo/bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.module`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("foo/bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.root`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("/home/foo"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = terraform.workspace`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("default"), + "num": cty.NullVal(cty.Number), + }, + }, + } + + for _, test := range tests { + t.Run(test.Config, func(t *testing.T) { + file, parseDiags := hclsyntax.ParseConfig([]byte(test.Config), "", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Errorf("- %s", diag) + } + return + } + + body := file.Body + + scope := &Scope{ + Data: data, + } + + gotVal, ctxDiags := scope.EvalSelfBlock(body, test.Self, schema, test.KeyData) + if ctxDiags.HasErrors() { + t.Fatal(ctxDiags.Err()) + } + + wantVal := cty.ObjectVal(test.Want) + + if !gotVal.RawEquals(wantVal) { + t.Errorf( + "wrong result\nexpr: %s\ngot: %#v\nwant: %#v", + test.Config, gotVal, wantVal, + ) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/cidr.go b/internal/terraform/lang/funcs/cidr.go new file mode 100644 index 00000000..e482cf3a --- /dev/null +++ b/internal/terraform/lang/funcs/cidr.go @@ -0,0 +1,215 @@ +package funcs + +import ( + "fmt" + "math/big" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/camptocamp/terraboard/internal/terraform/ipaddr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CidrHostFunc contructs a function that calculates a full host IP address +// within a given IP network address prefix. +var CidrHostFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "hostnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var hostNum *big.Int + if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { + return cty.UnknownVal(cty.String), err + } + _, network, err := ipaddr.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.HostBig(network, hostNum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ip.String()), nil + }, +}) + +// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given +// in CIDR notation into a subnet mask address. +var CidrNetmaskFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := ipaddr.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + if network.IP.To4() == nil { + return cty.UnknownVal(cty.String), fmt.Errorf("IPv6 addresses cannot have a netmask: %s", args[0].AsString()) + } + + return cty.StringVal(ipaddr.IP(network.Mask).String()), nil + }, +}) + +// CidrSubnetFunc contructs a function that calculates a subnet address within +// a given IP network address prefix. +var CidrSubnetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "newbits", + Type: cty.Number, + }, + { + Name: "netnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var newbits int + if err := gocty.FromCtyValue(args[1], &newbits); err != nil { + return cty.UnknownVal(cty.String), err + } + var netnum *big.Int + if err := gocty.FromCtyValue(args[2], &netnum); err != nil { + return cty.UnknownVal(cty.String), err + } + + _, network, err := ipaddr.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + newNetwork, err := cidr.SubnetBig(network, newbits, netnum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(newNetwork.String()), nil + }, +}) + +// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive +// subnet addresses at once, rather than just a single subnet extension. +var CidrSubnetsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "newbits", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := ipaddr.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) + } + startPrefixLen, _ := network.Mask.Size() + + prefixLengthArgs := args[1:] + if len(prefixLengthArgs) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + var firstLength int + if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(1, err) + } + firstLength += startPrefixLen + + retVals := make([]cty.Value, len(prefixLengthArgs)) + + current, _ := cidr.PreviousSubnet(network, firstLength) + for i, lengthArg := range prefixLengthArgs { + var length int + if err := gocty.FromCtyValue(lengthArg, &length); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) + } + + if length < 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") + } + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if length > 32 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") + } + length += startPrefixLen + if length > (len(network.IP) * 8) { + protocol := "IP" + switch len(network.IP) * 8 { + case 32: + protocol = "IPv4" + case 128: + protocol = "IPv6" + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) + } + + next, rollover := cidr.NextSubnet(current, length) + if rollover || !network.Contains(next.IP) { + // If we run out of suffix bits in the base CIDR prefix then + // NextSubnet will start incrementing the prefix bits, which + // we don't allow because it would then allocate addresses + // outside of the caller's given prefix. + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) + } + + current = next + retVals[i] = cty.StringVal(current.String()) + } + + return cty.ListVal(retVals), nil + }, +}) + +// CidrHost calculates a full host IP address within a given IP network address prefix. +func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { + return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) +} + +// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. +func CidrNetmask(prefix cty.Value) (cty.Value, error) { + return CidrNetmaskFunc.Call([]cty.Value{prefix}) +} + +// CidrSubnet calculates a subnet address within a given IP network address prefix. +func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { + return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) +} + +// CidrSubnets calculates a sequence of consecutive subnet prefixes that may +// be of different prefix lengths under a common base prefix. +func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(newbits)+1) + args[0] = prefix + copy(args[1:], newbits) + return CidrSubnetsFunc.Call(args) +} diff --git a/internal/terraform/lang/funcs/cidr_test.go b/internal/terraform/lang/funcs/cidr_test.go new file mode 100644 index 00000000..5d8a9605 --- /dev/null +++ b/internal/terraform/lang/funcs/cidr_test.go @@ -0,0 +1,395 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestCidrHost(t *testing.T) { + tests := []struct { + Prefix cty.Value + Hostnum cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("192.168.1.0/24"), + cty.NumberIntVal(5), + cty.StringVal("192.168.1.5"), + false, + }, + { + cty.StringVal("192.168.1.0/24"), + cty.NumberIntVal(-5), + cty.StringVal("192.168.1.251"), + false, + }, + { + cty.StringVal("192.168.1.0/24"), + cty.NumberIntVal(-256), + cty.StringVal("192.168.1.0"), + false, + }, + { + // We inadvertently inherited a pre-Go1.17 standard library quirk + // if parsing zero-prefix parts as decimal rather than octal. + // Go 1.17 resolved that quirk by making zero-prefix invalid, but + // we've preserved our existing behavior for backward compatibility, + // on the grounds that these functions are for generating addresses + // rather than validating or processing them. We do always generate + // a canonical result regardless of the input, though. + cty.StringVal("010.001.0.0/24"), + cty.NumberIntVal(6), + cty.StringVal("10.1.0.6"), + false, + }, + { + cty.StringVal("192.168.1.0/30"), + cty.NumberIntVal(255), + cty.UnknownVal(cty.String), + true, // 255 doesn't fit in two bits + }, + { + cty.StringVal("192.168.1.0/30"), + cty.NumberIntVal(-255), + cty.UnknownVal(cty.String), + true, // 255 doesn't fit in two bits + }, + { + cty.StringVal("not-a-cidr"), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, // not a valid CIDR mask + }, + { + cty.StringVal("10.256.0.0/8"), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, // can't have an octet >255 + }, + { // fractions are Not Ok + cty.StringVal("10.256.0.0/8"), + cty.NumberFloatVal(.75), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrhost(%#v, %#v)", test.Prefix, test.Hostnum), func(t *testing.T) { + got, err := CidrHost(test.Prefix, test.Hostnum) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestCidrNetmask(t *testing.T) { + tests := []struct { + Prefix cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("192.168.1.0/24"), + cty.StringVal("255.255.255.0"), + false, + }, + { + cty.StringVal("192.168.1.0/32"), + cty.StringVal("255.255.255.255"), + false, + }, + { + cty.StringVal("0.0.0.0/0"), + cty.StringVal("0.0.0.0"), + false, + }, + { + // We inadvertently inherited a pre-Go1.17 standard library quirk + // if parsing zero-prefix parts as decimal rather than octal. + // Go 1.17 resolved that quirk by making zero-prefix invalid, but + // we've preserved our existing behavior for backward compatibility, + // on the grounds that these functions are for generating addresses + // rather than validating or processing them. + cty.StringVal("010.001.0.0/24"), + cty.StringVal("255.255.255.0"), + false, + }, + { + cty.StringVal("not-a-cidr"), + cty.UnknownVal(cty.String), + true, // not a valid CIDR mask + }, + { + cty.StringVal("110.256.0.0/8"), + cty.UnknownVal(cty.String), + true, // can't have an octet >255 + }, + { + cty.StringVal("1::/64"), + cty.UnknownVal(cty.String), + true, // IPv6 is invalid + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrnetmask(%#v)", test.Prefix), func(t *testing.T) { + got, err := CidrNetmask(test.Prefix) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestCidrSubnet(t *testing.T) { + tests := []struct { + Prefix cty.Value + Newbits cty.Value + Netnum cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("192.168.2.0/20"), + cty.NumberIntVal(4), + cty.NumberIntVal(6), + cty.StringVal("192.168.6.0/24"), + false, + }, + { + cty.StringVal("fe80::/48"), + cty.NumberIntVal(16), + cty.NumberIntVal(6), + cty.StringVal("fe80:0:0:6::/64"), + false, + }, + { // IPv4 address encoded in IPv6 syntax gets normalized + cty.StringVal("::ffff:192.168.0.0/112"), + cty.NumberIntVal(8), + cty.NumberIntVal(6), + cty.StringVal("192.168.6.0/24"), + false, + }, + { + cty.StringVal("fe80::/48"), + cty.NumberIntVal(33), + cty.NumberIntVal(6), + cty.StringVal("fe80::3:0:0:0/81"), + false, + }, + { + // We inadvertently inherited a pre-Go1.17 standard library quirk + // if parsing zero-prefix parts as decimal rather than octal. + // Go 1.17 resolved that quirk by making zero-prefix invalid, but + // we've preserved our existing behavior for backward compatibility, + // on the grounds that these functions are for generating addresses + // rather than validating or processing them. We do always generate + // a canonical result regardless of the input, though. + cty.StringVal("010.001.0.0/24"), + cty.NumberIntVal(4), + cty.NumberIntVal(1), + cty.StringVal("10.1.0.16/28"), + false, + }, + { // not enough bits left + cty.StringVal("192.168.0.0/30"), + cty.NumberIntVal(4), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, + }, + { // can't encode 16 in 2 bits + cty.StringVal("192.168.0.0/168"), + cty.NumberIntVal(2), + cty.NumberIntVal(16), + cty.UnknownVal(cty.String), + true, + }, + { // not a valid CIDR mask + cty.StringVal("not-a-cidr"), + cty.NumberIntVal(4), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, + }, + { // can't have an octet >255 + cty.StringVal("10.256.0.0/8"), + cty.NumberIntVal(4), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, + }, + { // fractions are Not Ok + cty.StringVal("10.256.0.0/8"), + cty.NumberFloatVal(2.0 / 3.0), + cty.NumberFloatVal(.75), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrsubnet(%#v, %#v, %#v)", test.Prefix, test.Newbits, test.Netnum), func(t *testing.T) { + got, err := CidrSubnet(test.Prefix, test.Newbits, test.Netnum) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} +func TestCidrSubnets(t *testing.T) { + tests := []struct { + Prefix cty.Value + Newbits []cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("10.0.0.0/21"), + []cty.Value{ + cty.NumberIntVal(3), + cty.NumberIntVal(3), + cty.NumberIntVal(3), + cty.NumberIntVal(4), + cty.NumberIntVal(4), + cty.NumberIntVal(4), + cty.NumberIntVal(7), + cty.NumberIntVal(7), + cty.NumberIntVal(7), + }, + cty.ListVal([]cty.Value{ + cty.StringVal("10.0.0.0/24"), + cty.StringVal("10.0.1.0/24"), + cty.StringVal("10.0.2.0/24"), + cty.StringVal("10.0.3.0/25"), + cty.StringVal("10.0.3.128/25"), + cty.StringVal("10.0.4.0/25"), + cty.StringVal("10.0.4.128/28"), + cty.StringVal("10.0.4.144/28"), + cty.StringVal("10.0.4.160/28"), + }), + ``, + }, + { + // We inadvertently inherited a pre-Go1.17 standard library quirk + // if parsing zero-prefix parts as decimal rather than octal. + // Go 1.17 resolved that quirk by making zero-prefix invalid, but + // we've preserved our existing behavior for backward compatibility, + // on the grounds that these functions are for generating addresses + // rather than validating or processing them. We do always generate + // a canonical result regardless of the input, though. + cty.StringVal("010.0.0.0/21"), + []cty.Value{ + cty.NumberIntVal(3), + }, + cty.ListVal([]cty.Value{ + cty.StringVal("10.0.0.0/24"), + }), + ``, + }, + { + cty.StringVal("10.0.0.0/30"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(3), + }, + cty.UnknownVal(cty.List(cty.String)), + `would extend prefix to 33 bits, which is too long for an IPv4 address`, + }, + { + cty.StringVal("10.0.0.0/8"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(1), + cty.NumberIntVal(1), + }, + cty.UnknownVal(cty.List(cty.String)), + `not enough remaining address space for a subnet with a prefix of 9 bits after 10.128.0.0/9`, + }, + { + cty.StringVal("10.0.0.0/8"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(0), + }, + cty.UnknownVal(cty.List(cty.String)), + `must extend prefix by at least one bit`, + }, + { + cty.StringVal("10.0.0.0/8"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(-1), + }, + cty.UnknownVal(cty.List(cty.String)), + `must extend prefix by at least one bit`, + }, + { + cty.StringVal("fe80::/48"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(33), + }, + cty.UnknownVal(cty.List(cty.String)), + `may not extend prefix by more than 32 bits`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrsubnets(%#v, %#v)", test.Prefix, test.Newbits), func(t *testing.T) { + got, err := CidrSubnets(test.Prefix, test.Newbits...) + wantErr := test.Err != "" + + if wantErr { + if err == nil { + t.Fatal("succeeded; want error") + } + if err.Error() != test.Err { + t.Fatalf("wrong error\ngot: %s\nwant: %s", err.Error(), test.Err) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/collection.go b/internal/terraform/lang/funcs/collection.go new file mode 100644 index 00000000..2b772f35 --- /dev/null +++ b/internal/terraform/lang/funcs/collection.go @@ -0,0 +1,713 @@ +package funcs + +import ( + "errors" + "fmt" + "math/big" + "sort" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + "github.com/zclconf/go-cty/cty/gocty" +) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + AllowMarked: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + collTy := args[0].Type() + switch { + case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: + return cty.Number, nil + default: + return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + coll := args[0] + collTy := args[0].Type() + marks := coll.Marks() + switch { + case collTy == cty.DynamicPseudoType: + return cty.UnknownVal(cty.Number).WithMarks(marks), nil + case collTy.IsTupleType(): + l := len(collTy.TupleElementTypes()) + return cty.NumberIntVal(int64(l)).WithMarks(marks), nil + case collTy.IsObjectType(): + l := len(collTy.AttributeTypes()) + return cty.NumberIntVal(int64(l)).WithMarks(marks), nil + case collTy == cty.String: + // We'll delegate to the cty stdlib strlen function here, because + // it deals with all of the complexities of tokenizing unicode + // grapheme clusters. + return stdlib.Strlen(coll) + case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): + return coll.Length(), nil + default: + // Should never happen, because of the checks in our Type func above + return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") + } + }, +}) + +// AllTrueFunc constructs a function that returns true if all elements of the +// list are true. If the list is empty, return true. +var AllTrueFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.Bool), + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result := cty.True + for it := args[0].ElementIterator(); it.Next(); { + _, v := it.Element() + if !v.IsKnown() { + return cty.UnknownVal(cty.Bool), nil + } + if v.IsNull() { + return cty.False, nil + } + result = result.And(v) + if result.False() { + return cty.False, nil + } + } + return result, nil + }, +}) + +// AnyTrueFunc constructs a function that returns true if any element of the +// list is true. If the list is empty, return false. +var AnyTrueFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.Bool), + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result := cty.False + var hasUnknown bool + for it := args[0].ElementIterator(); it.Next(); { + _, v := it.Element() + if !v.IsKnown() { + hasUnknown = true + continue + } + if v.IsNull() { + continue + } + result = result.Or(v) + if result.True() { + return cty.True, nil + } + } + if hasUnknown { + return cty.UnknownVal(cty.Bool), nil + } + return result, nil + }, +}) + +// CoalesceFunc constructs a function that takes any number of arguments and +// returns the first one that isn't empty. This function was copied from go-cty +// stdlib and modified so that it returns the first *non-empty* non-null element +// from a sequence, instead of merely the first non-null. +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + // We already know this will succeed because of the checks in our Type func above + argVal, _ = convert.Convert(argVal, retType) + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { + continue + } + + return argVal, nil + } + return cty.NilVal, errors.New("no non-null, non-empty-string arguments") + }, +}) + +// IndexFunc constructs a function that finds the element index for a given value in a list. +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { + return cty.NilVal, errors.New("argument must be a list or tuple") + } + + if !args[0].IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, errors.New("cannot search an empty list") + } + + for it := args[0].ElementIterator(); it.Next(); { + i, v := it.Element() + eq, err := stdlib.Equal(v, args[1]) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + if eq.True() { + return i, nil + } + } + return cty.NilVal, errors.New("item not found") + + }, +}) + +// LookupFunc constructs a function that performs dynamic lookups of map types. +var LookupFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + AllowMarked: true, + }, + { + Name: "key", + Type: cty.String, + AllowMarked: true, + }, + }, + VarParam: &function.Parameter{ + Name: "default", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + AllowMarked: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 1 || len(args) > 3 { + return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) + } + + ty := args[0].Type() + + switch { + case ty.IsObjectType(): + if !args[1].IsKnown() { + return cty.DynamicPseudoType, nil + } + + keyVal, _ := args[1].Unmark() + key := keyVal.AsString() + if ty.HasAttribute(key) { + return args[0].GetAttr(key).Type(), nil + } else if len(args) == 3 { + // if the key isn't found but a default is provided, + // return the default type + return args[2].Type(), nil + } + return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) + case ty.IsMapType(): + if len(args) == 3 { + _, err = convert.Convert(args[2], ty.ElementType()) + if err != nil { + return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") + } + } + return ty.ElementType(), nil + default: + return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var defaultVal cty.Value + defaultValueSet := false + + if len(args) == 3 { + // intentionally leave default value marked + defaultVal = args[2] + defaultValueSet = true + } + + // keep track of marks from the collection and key + var markses []cty.ValueMarks + + // unmark collection, retain marks to reapply later + mapVar, mapMarks := args[0].Unmark() + markses = append(markses, mapMarks) + + // include marks on the key in the result + keyVal, keyMarks := args[1].Unmark() + if len(keyMarks) > 0 { + markses = append(markses, keyMarks) + } + lookupKey := keyVal.AsString() + + if !mapVar.IsKnown() { + return cty.UnknownVal(retType).WithMarks(markses...), nil + } + + if mapVar.Type().IsObjectType() { + if mapVar.Type().HasAttribute(lookupKey) { + return mapVar.GetAttr(lookupKey).WithMarks(markses...), nil + } + } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { + return mapVar.Index(cty.StringVal(lookupKey)).WithMarks(markses...), nil + } + + if defaultValueSet { + defaultVal, err = convert.Convert(defaultVal, retType) + if err != nil { + return cty.NilVal, err + } + return defaultVal.WithMarks(markses...), nil + } + + return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( + "lookup failed to find key %s", redactIfSensitive(lookupKey, keyMarks)) + }, +}) + +// MatchkeysFunc constructs a function that constructs a new list by taking a +// subset of elements from one list whose indexes match the corresponding +// indexes of values in another list. +var MatchkeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "keys", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "searchset", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + if ty == cty.NilType { + return cty.NilType, errors.New("keys and searchset must be of the same type") + } + + // the return type is based on args[0] (values) + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !args[0].IsKnown() { + return cty.UnknownVal(cty.List(retType.ElementType())), nil + } + + if args[0].LengthInt() != args[1].LengthInt() { + return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") + } + + output := make([]cty.Value, 0) + values := args[0] + + // Keys and searchset must be the same type. + // We can skip error checking here because we've already verified that + // they can be unified in the Type function + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + keys, _ := convert.Convert(args[1], ty) + searchset, _ := convert.Convert(args[2], ty) + + // if searchset is empty, return an empty list. + if searchset.LengthInt() == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, key := it.Element() + for iter := searchset.ElementIterator(); iter.Next(); { + _, search := iter.Element() + eq, err := stdlib.Equal(key, search) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.ListValEmpty(retType.ElementType()), nil + } + if eq.True() { + v := values.Index(cty.NumberIntVal(int64(i))) + output = append(output, v) + break + } + } + i++ + } + + // if we haven't matched any key, then output is an empty list. + if len(output) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(output), nil + }, +}) + +// OneFunc returns either the first element of a one-element list, or null +// if given a zero-element list. +var OneFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty := args[0].Type() + switch { + case ty.IsListType() || ty.IsSetType(): + return ty.ElementType(), nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + switch len(etys) { + case 0: + // No specific type information, so we'll ultimately return + // a null value of unknown type. + return cty.DynamicPseudoType, nil + case 1: + return etys[0], nil + } + } + return cty.NilType, function.NewArgErrorf(0, "must be a list, set, or tuple value with either zero or one elements") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + val := args[0] + ty := val.Type() + + // Our parameter spec above doesn't set AllowUnknown or AllowNull, + // so we can assume our top-level collection is both known and non-null + // in here. + + switch { + case ty.IsListType() || ty.IsSetType(): + lenVal := val.Length() + if !lenVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + var l int + err := gocty.FromCtyValue(lenVal, &l) + if err != nil { + // It would be very strange to get here, because that would + // suggest that the length is either not a number or isn't + // an integer, which would suggest a bug in cty. + return cty.NilVal, fmt.Errorf("invalid collection length: %s", err) + } + switch l { + case 0: + return cty.NullVal(retType), nil + case 1: + var ret cty.Value + // We'll use an iterator here because that works for both lists + // and sets, whereas indexing directly would only work for lists. + // Since we've just checked the length, we should only actually + // run this loop body once. + for it := val.ElementIterator(); it.Next(); { + _, ret = it.Element() + } + return ret, nil + } + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + switch len(etys) { + case 0: + return cty.NullVal(retType), nil + case 1: + ret := val.Index(cty.NumberIntVal(0)) + return ret, nil + } + } + return cty.NilVal, function.NewArgErrorf(0, "must be a list, set, or tuple value with either zero or one elements") + }, +}) + +// SumFunc constructs a function that returns the sum of all +// numbers provided in a list +var SumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + + if !args[0].CanIterateElements() { + return cty.NilVal, function.NewArgErrorf(0, "cannot sum noniterable") + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, function.NewArgErrorf(0, "cannot sum an empty list") + } + + arg := args[0].AsValueSlice() + ty := args[0].Type() + + if !ty.IsListType() && !ty.IsSetType() && !ty.IsTupleType() { + return cty.NilVal, function.NewArgErrorf(0, fmt.Sprintf("argument must be list, set, or tuple. Received %s", ty.FriendlyName())) + } + + if !args[0].IsWhollyKnown() { + return cty.UnknownVal(cty.Number), nil + } + + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + s := arg[0] + if s.IsNull() { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + s, err = convert.Convert(s, cty.Number) + if err != nil { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + for _, v := range arg[1:] { + if v.IsNull() { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + v, err = convert.Convert(v, cty.Number) + if err != nil { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + s = s.Add(v) + } + + return s, nil + }, +}) + +// TransposeFunc constructs a function that takes a map of lists of strings and +// swaps the keys and values to produce a new map of lists of strings. +var TransposeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.Map(cty.List(cty.String)), + }, + }, + Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputMap := args[0] + if !inputMap.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + outputMap := make(map[string]cty.Value) + tmpMap := make(map[string][]string) + + for it := inputMap.ElementIterator(); it.Next(); { + inKey, inVal := it.Element() + for iter := inVal.ElementIterator(); iter.Next(); { + _, val := iter.Element() + if !val.Type().Equals(cty.String) { + return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") + } + + outKey := val.AsString() + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey.AsString()) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]cty.Value, 0) + for _, v := range outVal { + values = append(values, cty.StringVal(v)) + } + outputMap[outKey] = cty.ListVal(values) + } + + if len(outputMap) == 0 { + return cty.MapValEmpty(cty.List(cty.String)), nil + } + + return cty.MapVal(outputMap), nil + }, +}) + +// ListFunc constructs a function that takes an arbitrary number of arguments +// and returns a list containing those values in the same order. +// +// This function is deprecated in Terraform v0.12 +var ListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("the \"list\" function was deprecated in Terraform v0.12 and is no longer available; use tolist([ ... ]) syntax to write a literal list") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.DynamicVal, fmt.Errorf("the \"list\" function was deprecated in Terraform v0.12 and is no longer available; use tolist([ ... ]) syntax to write a literal list") + }, +}) + +// MapFunc constructs a function that takes an even number of arguments and +// returns a map whose elements are constructed from consecutive pairs of arguments. +// +// This function is deprecated in Terraform v0.12 +var MapFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("the \"map\" function was deprecated in Terraform v0.12 and is no longer available; use tomap({ ... }) syntax to write a literal map") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.DynamicVal, fmt.Errorf("the \"map\" function was deprecated in Terraform v0.12 and is no longer available; use tomap({ ... }) syntax to write a literal map") + }, +}) + +// Length returns the number of elements in the given collection or number of +// Unicode characters in the given string. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} + +// AllTrue returns true if all elements of the list are true. If the list is empty, +// return true. +func AllTrue(collection cty.Value) (cty.Value, error) { + return AllTrueFunc.Call([]cty.Value{collection}) +} + +// AnyTrue returns true if any element of the list is true. If the list is empty, +// return false. +func AnyTrue(collection cty.Value) (cty.Value, error) { + return AnyTrueFunc.Call([]cty.Value{collection}) +} + +// Coalesce takes any number of arguments and returns the first one that isn't empty. +func Coalesce(args ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(args) +} + +// Index finds the element index for a given value in a list. +func Index(list, value cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{list, value}) +} + +// List takes any number of list arguments and returns a list containing those +// values in the same order. +func List(args ...cty.Value) (cty.Value, error) { + return ListFunc.Call(args) +} + +// Lookup performs a dynamic lookup into a map. +// There are two required arguments, map and key, plus an optional default, +// which is a value to return if no key is found in map. +func Lookup(args ...cty.Value) (cty.Value, error) { + return LookupFunc.Call(args) +} + +// Map takes an even number of arguments and returns a map whose elements are constructed +// from consecutive pairs of arguments. +func Map(args ...cty.Value) (cty.Value, error) { + return MapFunc.Call(args) +} + +// Matchkeys constructs a new list by taking a subset of elements from one list +// whose indexes match the corresponding indexes of values in another list. +func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { + return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) +} + +// One returns either the first element of a one-element list, or null +// if given a zero-element list.. +func One(list cty.Value) (cty.Value, error) { + return OneFunc.Call([]cty.Value{list}) +} + +// Sum adds numbers in a list, set, or tuple +func Sum(list cty.Value) (cty.Value, error) { + return SumFunc.Call([]cty.Value{list}) +} + +// Transpose takes a map of lists of strings and swaps the keys and values to +// produce a new map of lists of strings. +func Transpose(values cty.Value) (cty.Value, error) { + return TransposeFunc.Call([]cty.Value{values}) +} diff --git a/internal/terraform/lang/funcs/collection_test.go b/internal/terraform/lang/funcs/collection_test.go new file mode 100644 index 00000000..c8303d7d --- /dev/null +++ b/internal/terraform/lang/funcs/collection_test.go @@ -0,0 +1,1837 @@ +package funcs + +import ( + "fmt" + "math" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestLength(t *testing.T) { + tests := []struct { + Value cty.Value + Want cty.Value + }{ + { + cty.ListValEmpty(cty.Number), + cty.NumberIntVal(0), + }, + { + cty.ListVal([]cty.Value{cty.True}), + cty.NumberIntVal(1), + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Bool)}), + cty.NumberIntVal(1), + }, + { + cty.SetValEmpty(cty.Number), + cty.NumberIntVal(0), + }, + { + cty.SetVal([]cty.Value{cty.True}), + cty.NumberIntVal(1), + }, + { + cty.MapValEmpty(cty.Bool), + cty.NumberIntVal(0), + }, + { + cty.MapVal(map[string]cty.Value{"hello": cty.True}), + cty.NumberIntVal(1), + }, + { + cty.EmptyTupleVal, + cty.NumberIntVal(0), + }, + { + cty.UnknownVal(cty.EmptyTuple), + cty.NumberIntVal(0), + }, + { + cty.TupleVal([]cty.Value{cty.True}), + cty.NumberIntVal(1), + }, + { + cty.EmptyObjectVal, + cty.NumberIntVal(0), + }, + { + cty.UnknownVal(cty.EmptyObject), + cty.NumberIntVal(0), + }, + { + cty.ObjectVal(map[string]cty.Value{"true": cty.True}), + cty.NumberIntVal(1), + }, + { + cty.UnknownVal(cty.List(cty.Bool)), + cty.UnknownVal(cty.Number), + }, + { + cty.DynamicVal, + cty.UnknownVal(cty.Number), + }, + { + cty.StringVal("hello"), + cty.NumberIntVal(5), + }, + { + cty.StringVal(""), + cty.NumberIntVal(0), + }, + { + cty.StringVal("1"), + cty.NumberIntVal(1), + }, + { + cty.StringVal("Живой Журнал"), + cty.NumberIntVal(12), + }, + { + // note that the dieresis here is intentionally a combining + // ligature. + cty.StringVal("noël"), + cty.NumberIntVal(4), + }, + { + // The Es in this string has three combining acute accents. + // This tests something that NFC-normalization cannot collapse + // into a single precombined codepoint, since otherwise we might + // be cheating and relying on the single-codepoint forms. + cty.StringVal("wé́́é́́é́́!"), + cty.NumberIntVal(5), + }, + { + // Go's normalization forms don't handle this ligature, so we + // will produce the wrong result but this is now a compatibility + // constraint and so we'll test it. + cty.StringVal("baffle"), + cty.NumberIntVal(4), + }, + { + cty.StringVal("😸😾"), + cty.NumberIntVal(2), + }, + { + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.Number), + }, + { + cty.DynamicVal, + cty.UnknownVal(cty.Number), + }, + { // Marked collections return a marked length + cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("world"), + }).Mark("secret"), + cty.NumberIntVal(2).Mark("secret"), + }, + { // Marks on values in unmarked collections do not propagate + cty.ListVal([]cty.Value{ + cty.StringVal("hello").Mark("a"), + cty.StringVal("world").Mark("b"), + }), + cty.NumberIntVal(2), + }, + { // Marked strings return a marked length + cty.StringVal("hello world").Mark("secret"), + cty.NumberIntVal(11).Mark("secret"), + }, + { // Marked tuples return a marked length + cty.TupleVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("world"), + }).Mark("secret"), + cty.NumberIntVal(2).Mark("secret"), + }, + { // Marks on values in unmarked tuples do not propagate + cty.TupleVal([]cty.Value{ + cty.StringVal("hello").Mark("a"), + cty.StringVal("world").Mark("b"), + }), + cty.NumberIntVal(2), + }, + { // Marked objects return a marked length + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + "b": cty.StringVal("world"), + "c": cty.StringVal("nice to meet you"), + }).Mark("secret"), + cty.NumberIntVal(3).Mark("secret"), + }, + { // Marks on object attribute values do not propagate + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello").Mark("a"), + "b": cty.StringVal("world").Mark("b"), + "c": cty.StringVal("nice to meet you").Mark("c"), + }), + cty.NumberIntVal(3), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Length(%#v)", test.Value), func(t *testing.T) { + got, err := Length(test.Value) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestAllTrue(t *testing.T) { + tests := []struct { + Collection cty.Value + Want cty.Value + Err bool + }{ + { + cty.ListValEmpty(cty.Bool), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.True}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.False}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.True, cty.False}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.False, cty.True}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.True, cty.NullVal(cty.Bool)}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Bool)}), + cty.UnknownVal(cty.Bool), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.UnknownVal(cty.Bool), + }), + cty.UnknownVal(cty.Bool), + false, + }, + { + cty.UnknownVal(cty.List(cty.Bool)), + cty.UnknownVal(cty.Bool), + false, + }, + { + cty.NullVal(cty.List(cty.Bool)), + cty.NilVal, + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("alltrue(%#v)", test.Collection), func(t *testing.T) { + got, err := AllTrue(test.Collection) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestAnyTrue(t *testing.T) { + tests := []struct { + Collection cty.Value + Want cty.Value + Err bool + }{ + { + cty.ListValEmpty(cty.Bool), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.True}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.False}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.True, cty.False}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.False, cty.True}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.Bool), cty.True}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Bool)}), + cty.UnknownVal(cty.Bool), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.False, + }), + cty.UnknownVal(cty.Bool), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.True, + }), + cty.True, + false, + }, + { + cty.UnknownVal(cty.List(cty.Bool)), + cty.UnknownVal(cty.Bool), + false, + }, + { + cty.NullVal(cty.List(cty.Bool)), + cty.NilVal, + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("anytrue(%#v)", test.Collection), func(t *testing.T) { + got, err := AnyTrue(test.Collection) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestCoalesce(t *testing.T) { + tests := []struct { + Values []cty.Value + Want cty.Value + Err bool + }{ + { + []cty.Value{cty.StringVal("first"), cty.StringVal("second"), cty.StringVal("third")}, + cty.StringVal("first"), + false, + }, + { + []cty.Value{cty.StringVal(""), cty.StringVal("second"), cty.StringVal("third")}, + cty.StringVal("second"), + false, + }, + { + []cty.Value{cty.StringVal(""), cty.StringVal("")}, + cty.NilVal, + true, + }, + { + []cty.Value{cty.True}, + cty.True, + false, + }, + { + []cty.Value{cty.NullVal(cty.Bool), cty.True}, + cty.True, + false, + }, + { + []cty.Value{cty.NullVal(cty.Bool), cty.False}, + cty.False, + false, + }, + { + []cty.Value{cty.NullVal(cty.Bool), cty.False, cty.StringVal("hello")}, + cty.StringVal("false"), + false, + }, + { + []cty.Value{cty.True, cty.UnknownVal(cty.Bool)}, + cty.True, + false, + }, + { + []cty.Value{cty.UnknownVal(cty.Bool), cty.True}, + cty.UnknownVal(cty.Bool), + false, + }, + { + []cty.Value{cty.UnknownVal(cty.Bool), cty.StringVal("hello")}, + cty.UnknownVal(cty.String), + false, + }, + { + []cty.Value{cty.DynamicVal, cty.True}, + cty.UnknownVal(cty.Bool), + false, + }, + { + []cty.Value{cty.DynamicVal}, + cty.DynamicVal, + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Coalesce(%#v...)", test.Values), func(t *testing.T) { + got, err := Coalesce(test.Values...) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestIndex(t *testing.T) { + tests := []struct { + List cty.Value + Value cty.Value + Want cty.Value + Err bool + }{ + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.StringVal("a"), + cty.NumberIntVal(0), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.UnknownVal(cty.String), + }), + cty.StringVal("a"), + cty.NumberIntVal(0), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.StringVal("b"), + cty.NumberIntVal(1), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.StringVal("z"), + cty.NilVal, + true, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("1"), + cty.StringVal("2"), + cty.StringVal("3"), + }), + cty.NumberIntVal(1), + cty.NumberIntVal(0), + true, + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NumberIntVal(2), + cty.NumberIntVal(1), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NumberIntVal(4), + cty.NilVal, + true, + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.StringVal("1"), + cty.NumberIntVal(0), + true, + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NumberIntVal(1), + cty.NumberIntVal(0), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("index(%#v, %#v)", test.List, test.Value), func(t *testing.T) { + got, err := Index(test.List, test.Value) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestLookup(t *testing.T) { + simpleMap := cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }) + intsMap := cty.MapVal(map[string]cty.Value{ + "foo": cty.NumberIntVal(42), + }) + mapOfLists := cty.MapVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.StringVal("bar"), + cty.StringVal("baz"), + }), + }) + mapOfMaps := cty.MapVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("bar"), + }), + "baz": cty.MapVal(map[string]cty.Value{ + "b": cty.StringVal("bat"), + }), + }) + mapOfTuples := cty.MapVal(map[string]cty.Value{ + "foo": cty.TupleVal([]cty.Value{cty.StringVal("bar")}), + "baz": cty.TupleVal([]cty.Value{cty.StringVal("bat")}), + }) + objectOfMaps := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("bar"), + }), + "baz": cty.MapVal(map[string]cty.Value{ + "b": cty.StringVal("bat"), + }), + }) + mapWithUnknowns := cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "baz": cty.UnknownVal(cty.String), + }) + mapWithObjects := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "baz": cty.NumberIntVal(42), + }) + + tests := []struct { + Values []cty.Value + Want cty.Value + Err bool + }{ + { + []cty.Value{ + simpleMap, + cty.StringVal("foo"), + }, + cty.StringVal("bar"), + false, + }, + { + []cty.Value{ + mapWithObjects, + cty.StringVal("foo"), + }, + cty.StringVal("bar"), + false, + }, + { + []cty.Value{ + intsMap, + cty.StringVal("foo"), + }, + cty.NumberIntVal(42), + false, + }, + { + []cty.Value{ + mapOfMaps, + cty.StringVal("foo"), + }, + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("bar"), + }), + false, + }, + { + []cty.Value{ + objectOfMaps, + cty.StringVal("foo"), + }, + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("bar"), + }), + false, + }, + { + []cty.Value{ + mapOfTuples, + cty.StringVal("foo"), + }, + cty.TupleVal([]cty.Value{cty.StringVal("bar")}), + false, + }, + { // Invalid key + []cty.Value{ + simpleMap, + cty.StringVal("bar"), + }, + cty.NilVal, + true, + }, + { // Invalid key + []cty.Value{ + mapWithObjects, + cty.StringVal("bar"), + }, + cty.NilVal, + true, + }, + { // Supplied default with valid key + []cty.Value{ + simpleMap, + cty.StringVal("foo"), + cty.StringVal(""), + }, + cty.StringVal("bar"), + false, + }, + { // Supplied default with valid (int) key + []cty.Value{ + simpleMap, + cty.StringVal("foo"), + cty.NumberIntVal(-1), + }, + cty.StringVal("bar"), + false, + }, + { // Supplied default with valid (int) key + []cty.Value{ + simpleMap, + cty.StringVal("foobar"), + cty.NumberIntVal(-1), + }, + cty.StringVal("-1"), + false, + }, + { // Supplied default with valid key + []cty.Value{ + mapWithObjects, + cty.StringVal("foobar"), + cty.StringVal(""), + }, + cty.StringVal(""), + false, + }, + { // Supplied default with invalid key + []cty.Value{ + simpleMap, + cty.StringVal("baz"), + cty.StringVal(""), + }, + cty.StringVal(""), + false, + }, + { // Supplied default with type mismatch: expects a map return + []cty.Value{ + mapOfMaps, + cty.StringVal("foo"), + cty.StringVal(""), + }, + cty.NilVal, + true, + }, + { // Supplied non-empty default with invalid key + []cty.Value{ + simpleMap, + cty.StringVal("bar"), + cty.StringVal("xyz"), + }, + cty.StringVal("xyz"), + false, + }, + { // too many args + []cty.Value{ + simpleMap, + cty.StringVal("foo"), + cty.StringVal("bar"), + cty.StringVal("baz"), + }, + cty.NilVal, + true, + }, + { // cannot search a map of lists + []cty.Value{ + mapOfLists, + cty.StringVal("baz"), + }, + cty.NilVal, + true, + }, + { + []cty.Value{ + mapWithUnknowns, + cty.StringVal("baz"), + }, + cty.UnknownVal(cty.String), + false, + }, + { + []cty.Value{ + mapWithUnknowns, + cty.StringVal("foo"), + }, + cty.StringVal("bar"), + false, + }, + { + []cty.Value{ + simpleMap, + cty.UnknownVal(cty.String), + }, + cty.UnknownVal(cty.String), + false, + }, + { + []cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + "bar": cty.StringVal("b"), + }), + cty.UnknownVal(cty.String), + }, + cty.DynamicVal, // if the key is unknown then we don't know which object attribute and thus can't know the type + false, + }, + { // successful marked collection lookup returns marked value + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + }).Mark("a"), + cty.StringVal("boop"), + cty.StringVal("nope"), + }, + cty.StringVal("beep").Mark("a"), + false, + }, + { // apply collection marks to unknown return vaue + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + "frob": cty.UnknownVal(cty.String), + }).Mark("a"), + cty.StringVal("frob"), + cty.StringVal("nope"), + }, + cty.UnknownVal(cty.String).Mark("a"), + false, + }, + { // propagate collection marks to default when returning + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + }).Mark("a"), + cty.StringVal("frob"), + cty.StringVal("nope").Mark("b"), + }, + cty.StringVal("nope").WithMarks(cty.NewValueMarks("a", "b")), + false, + }, + { // on unmarked collection, return only marks from found value + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep").Mark("a"), + "frob": cty.StringVal("honk").Mark("b"), + }), + cty.StringVal("frob"), + cty.StringVal("nope").Mark("c"), + }, + cty.StringVal("honk").Mark("b"), + false, + }, + { // on unmarked collection, return default exactly on missing + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep").Mark("a"), + "frob": cty.StringVal("honk").Mark("b"), + }), + cty.StringVal("squish"), + cty.StringVal("nope").Mark("c"), + }, + cty.StringVal("nope").Mark("c"), + false, + }, + { // retain marks on default if converted + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep").Mark("a"), + "frob": cty.StringVal("honk").Mark("b"), + }), + cty.StringVal("squish"), + cty.NumberIntVal(5).Mark("c"), + }, + cty.StringVal("5").Mark("c"), + false, + }, + { // propagate marks from key + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + "frob": cty.StringVal("honk"), + }), + cty.StringVal("boop").Mark("a"), + cty.StringVal("nope"), + }, + cty.StringVal("beep").Mark("a"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("lookup(%#v)", test.Values), func(t *testing.T) { + got, err := Lookup(test.Values...) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestLookup_error(t *testing.T) { + simpleMap := cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }) + + tests := map[string]struct { + Values []cty.Value + WantErr string + }{ + "failed to find non-sensitive key": { + []cty.Value{ + simpleMap, + cty.StringVal("boop"), + }, + `lookup failed to find key "boop"`, + }, + "failed to find sensitive key": { + []cty.Value{ + simpleMap, + cty.StringVal("boop").Mark(marks.Sensitive), + }, + "lookup failed to find key (sensitive value)", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + _, err := Lookup(test.Values...) + + if err == nil { + t.Fatal("succeeded; want error") + } + + if err.Error() != test.WantErr { + t.Errorf("wrong error\ngot: %#v\nwant: %#v", err, test.WantErr) + } + }) + } +} + +func TestMatchkeys(t *testing.T) { + tests := []struct { + Keys cty.Value + Values cty.Value + Searchset cty.Value + Want cty.Value + Err bool + }{ + { // normal usage + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.StringVal("ref3"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + false, + }, + { // normal usage 2, check the order + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.StringVal("ref3"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref2"), + cty.StringVal("ref1"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + false, + }, + { // no matches + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.StringVal("ref3"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref4"), + }), + cty.ListValEmpty(cty.String), + false, + }, + { // no matches 2 + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.StringVal("ref3"), + }), + cty.ListValEmpty(cty.String), + cty.ListValEmpty(cty.String), + false, + }, + { // zero case + cty.ListValEmpty(cty.String), + cty.ListValEmpty(cty.String), + cty.ListVal([]cty.Value{cty.StringVal("nope")}), + cty.ListValEmpty(cty.String), + false, + }, + { // complex values + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("a"), + }), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("a"), + }), + }), + false, + }, + { // unknowns + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.UnknownVal(cty.String), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.UnknownVal(cty.String), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + }), + cty.UnknownVal(cty.List(cty.String)), + false, + }, + { // different types that can be unified + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListValEmpty(cty.String), + false, + }, + { // complex values: values is a different type from keys and searchset + cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + }), + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("beep"), + }), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("beep"), + }), + }), + false, + }, + // errors + { // different types + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.NilVal, + true, + }, + { // lists of different length + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.NilVal, + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("matchkeys(%#v, %#v, %#v)", test.Keys, test.Values, test.Searchset), func(t *testing.T) { + got, err := Matchkeys(test.Keys, test.Values, test.Searchset) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestOne(t *testing.T) { + tests := []struct { + List cty.Value + Want cty.Value + Err string + }{ + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + }), + cty.NumberIntVal(1), + "", + }, + { + cty.ListValEmpty(cty.Number), + cty.NullVal(cty.Number), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Number), + }), + cty.UnknownVal(cty.Number), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Number), + cty.UnknownVal(cty.Number), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.UnknownVal(cty.List(cty.String)), + cty.UnknownVal(cty.String), + "", + }, + { + cty.NullVal(cty.List(cty.String)), + cty.NilVal, + "argument must not be null", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + }).Mark("boop"), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + { + cty.ListValEmpty(cty.Bool).Mark("boop"), + cty.NullVal(cty.Bool).Mark("boop"), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1).Mark("boop"), + }), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(1), + }), + cty.NumberIntVal(1), + "", + }, + { + cty.SetValEmpty(cty.Number), + cty.NullVal(cty.Number), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.SetVal([]cty.Value{ + cty.UnknownVal(cty.Number), + }), + cty.UnknownVal(cty.Number), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.UnknownVal(cty.Number), + cty.UnknownVal(cty.Number), + }), + // The above would be valid if those two unknown values were + // equal known values, so this returns unknown rather than failing. + cty.UnknownVal(cty.Number), + "", + }, + { + cty.UnknownVal(cty.Set(cty.String)), + cty.UnknownVal(cty.String), + "", + }, + { + cty.NullVal(cty.Set(cty.String)), + cty.NilVal, + "argument must not be null", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(1), + }).Mark("boop"), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + { + cty.SetValEmpty(cty.Bool).Mark("boop"), + cty.NullVal(cty.Bool).Mark("boop"), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(1).Mark("boop"), + }), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + }), + cty.NumberIntVal(1), + "", + }, + { + cty.EmptyTupleVal, + cty.NullVal(cty.DynamicPseudoType), + "", + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.TupleVal([]cty.Value{ + cty.UnknownVal(cty.Number), + }), + cty.UnknownVal(cty.Number), + "", + }, + { + cty.TupleVal([]cty.Value{ + cty.UnknownVal(cty.Number), + cty.UnknownVal(cty.Number), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.UnknownVal(cty.EmptyTuple), + // Could actually return null here, but don't for consistency with unknown lists + cty.UnknownVal(cty.DynamicPseudoType), + "", + }, + { + cty.UnknownVal(cty.Tuple([]cty.Type{cty.Bool})), + cty.UnknownVal(cty.Bool), + "", + }, + { + cty.UnknownVal(cty.Tuple([]cty.Type{cty.Bool, cty.Number})), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.NullVal(cty.EmptyTuple), + cty.NilVal, + "argument must not be null", + }, + { + cty.NullVal(cty.Tuple([]cty.Type{cty.Bool})), + cty.NilVal, + "argument must not be null", + }, + { + cty.NullVal(cty.Tuple([]cty.Type{cty.Bool, cty.Number})), + cty.NilVal, + "argument must not be null", + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + }).Mark("boop"), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + { + cty.EmptyTupleVal.Mark("boop"), + cty.NullVal(cty.DynamicPseudoType).Mark("boop"), + "", + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1).Mark("boop"), + }), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + + { + cty.DynamicVal, + cty.DynamicVal, + "", + }, + { + cty.NullVal(cty.DynamicPseudoType), + cty.NilVal, + "argument must not be null", + }, + { + cty.MapValEmpty(cty.String), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.EmptyObjectVal, + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.True, + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.UnknownVal(cty.Bool), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("one(%#v)", test.List), func(t *testing.T) { + got, err := One(test.List) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } else if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\n got: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSum(t *testing.T) { + tests := []struct { + List cty.Value + Want cty.Value + Err string + }{ + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NumberIntVal(6), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1476), + cty.NumberIntVal(2093), + cty.NumberIntVal(2092495), + cty.NumberIntVal(64589234), + cty.NumberIntVal(234), + }), + cty.NumberIntVal(66685532), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.UnknownVal(cty.String), + "argument must be list, set, or tuple of number values", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(10), + cty.NumberIntVal(-19), + cty.NumberIntVal(5), + }), + cty.NumberIntVal(-4), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberFloatVal(10.2), + cty.NumberFloatVal(19.4), + cty.NumberFloatVal(5.7), + }), + cty.NumberFloatVal(35.3), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberFloatVal(-10.2), + cty.NumberFloatVal(-19.4), + cty.NumberFloatVal(-5.7), + }), + cty.NumberFloatVal(-35.3), + "", + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.Number)}), + cty.NilVal, + "argument must be list, set, or tuple of number values", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(5), + cty.NullVal(cty.Number), + }), + cty.NilVal, + "argument must be list, set, or tuple of number values", + }, + { + cty.SetVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.UnknownVal(cty.String), + "argument must be list, set, or tuple of number values", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(10), + cty.NumberIntVal(-19), + cty.NumberIntVal(5), + }), + cty.NumberIntVal(-4), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(10), + cty.NumberIntVal(25), + cty.NumberIntVal(30), + }), + cty.NumberIntVal(65), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberFloatVal(2340.8), + cty.NumberFloatVal(10.2), + cty.NumberFloatVal(3), + }), + cty.NumberFloatVal(2354), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberFloatVal(2), + }), + cty.NumberFloatVal(2), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberFloatVal(-2), + cty.NumberFloatVal(-50), + cty.NumberFloatVal(-20), + cty.NumberFloatVal(-123), + cty.NumberFloatVal(-4), + }), + cty.NumberFloatVal(-199), + "", + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(12), + cty.StringVal("a"), + cty.NumberIntVal(38), + }), + cty.UnknownVal(cty.String), + "argument must be list, set, or tuple of number values", + }, + { + cty.NumberIntVal(12), + cty.NilVal, + "cannot sum noniterable", + }, + { + cty.ListValEmpty(cty.Number), + cty.NilVal, + "cannot sum an empty list", + }, + { + cty.MapVal(map[string]cty.Value{"hello": cty.True}), + cty.NilVal, + "argument must be list, set, or tuple. Received map of bool", + }, + { + cty.UnknownVal(cty.Number), + cty.UnknownVal(cty.Number), + "", + }, + { + cty.UnknownVal(cty.List(cty.Number)), + cty.UnknownVal(cty.Number), + "", + }, + { // known list containing unknown values + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Number)}), + cty.UnknownVal(cty.Number), + "", + }, + { // numbers too large to represent as float64 + cty.ListVal([]cty.Value{ + cty.MustParseNumberVal("1e+500"), + cty.MustParseNumberVal("1e+500"), + }), + cty.MustParseNumberVal("2e+500"), + "", + }, + { // edge case we have a special error handler for + cty.ListVal([]cty.Value{ + cty.NumberFloatVal(math.Inf(1)), + cty.NumberFloatVal(math.Inf(-1)), + }), + cty.NilVal, + "can't compute sum of opposing infinities", + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("1"), + cty.StringVal("2"), + cty.StringVal("3"), + }), + cty.NumberIntVal(6), + "", + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sum(%#v)", test.List), func(t *testing.T) { + got, err := Sum(test.List) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } else if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\n got: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestTranspose(t *testing.T) { + tests := []struct { + Values cty.Value + Want cty.Value + Err bool + }{ + { + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + "key2": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + "key3": cty.ListVal([]cty.Value{ + cty.StringVal("c"), + }), + "key4": cty.ListValEmpty(cty.String), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "b": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "c": cty.ListVal([]cty.Value{ + cty.StringVal("key2"), + cty.StringVal("key3"), + }), + }), + false, + }, + { // map - unknown value + cty.MapVal(map[string]cty.Value{ + "key1": cty.UnknownVal(cty.List(cty.String)), + }), + cty.UnknownVal(cty.Map(cty.List(cty.String))), + false, + }, + { // bad map - empty value + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListValEmpty(cty.String), + }), + cty.MapValEmpty(cty.List(cty.String)), + false, + }, + { // bad map - value not a list + cty.MapVal(map[string]cty.Value{ + "key1": cty.StringVal("a"), + }), + cty.NilVal, + true, + }, + { // marks (deep or shallow) on any elements will propegate to the entire return value + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListVal([]cty.Value{ + cty.StringVal("a").Mark("beep"), // mark on the inner list element + cty.StringVal("b"), + }), + "key2": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }).Mark("boop"), // mark on the map element + "key3": cty.ListVal([]cty.Value{ + cty.StringVal("c"), + }), + "key4": cty.ListValEmpty(cty.String), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "b": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "c": cty.ListVal([]cty.Value{ + cty.StringVal("key2"), + cty.StringVal("key3")}), + }).WithMarks(cty.NewValueMarks("beep", "boop")), + false, + }, + { // Marks on the input value will be applied to the return value + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + "key2": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + "key3": cty.ListVal([]cty.Value{ + cty.StringVal("c"), + }), + }).Mark("beep"), // mark on the entire input value + cty.MapVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "b": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "c": cty.ListVal([]cty.Value{ + cty.StringVal("key2"), + cty.StringVal("key3"), + }), + }).Mark("beep"), + false, + }, + { // Marks on the entire input value AND inner elements (deep or shallow) ALL apply to the return + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }).Mark("beep"), // mark on the map element + "key2": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + "key3": cty.ListVal([]cty.Value{ + cty.StringVal("c").Mark("boop"), // mark on the inner list element + }), + }).Mark("bloop"), // mark on the entire input value + cty.MapVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "b": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "c": cty.ListVal([]cty.Value{ + cty.StringVal("key2"), + cty.StringVal("key3"), + }), + }).WithMarks(cty.NewValueMarks("beep", "boop", "bloop")), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("transpose(%#v)", test.Values), func(t *testing.T) { + got, err := Transpose(test.Values) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/conversion.go b/internal/terraform/lang/funcs/conversion.go new file mode 100644 index 00000000..ce81291c --- /dev/null +++ b/internal/terraform/lang/funcs/conversion.go @@ -0,0 +1,121 @@ +package funcs + +import ( + "strconv" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/camptocamp/terraboard/internal/terraform/lang/types" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeToFunc constructs a "to..." function, like "tostring", which converts +// its argument to a specific type or type kind. +// +// The given type wantTy can be any type constraint that cty's "convert" package +// would accept. In particular, this means that you can pass +// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which +// will then cause cty to attempt to unify all of the element types when given +// a tuple. +func MakeToFunc(wantTy cty.Type) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "v", + // We use DynamicPseudoType rather than wantTy here so that + // all values will pass through the function API verbatim and + // we can handle the conversion logic within the Type and + // Impl functions. This allows us to customize the error + // messages to be more appropriate for an explicit type + // conversion, whereas the cty function system produces + // messages aimed at _implicit_ type conversions. + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + gotTy := args[0].Type() + if gotTy.Equals(wantTy) { + return wantTy, nil + } + conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) + if conv == nil { + // We'll use some specialized errors for some trickier cases, + // but most we can handle in a simple way. + switch { + case gotTy.IsTupleType() && wantTy.IsTupleType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + case gotTy.IsObjectType() && wantTy.IsObjectType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + default: + return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + // If a conversion is available then everything is fine. + return wantTy, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // We didn't set "AllowUnknown" on our argument, so it is guaranteed + // to be known here but may still be null. + ret, err := convert.Convert(args[0], retType) + if err != nil { + val, _ := args[0].UnmarkDeep() + // Because we used GetConversionUnsafe above, conversion can + // still potentially fail in here. For example, if the user + // asks to convert the string "a" to bool then we'll + // optimistically permit it during type checking but fail here + // once we note that the value isn't either "true" or "false". + gotTy := val.Type() + switch { + case marks.Contains(args[0], marks.Sensitive): + // Generic message so we won't inadvertently disclose + // information about sensitive values. + return cty.NilVal, function.NewArgErrorf(0, "cannot convert this sensitive %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + + case gotTy == cty.String && wantTy == cty.Bool: + what := "string" + if !val.IsNull() { + what = strconv.Quote(val.AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) + case gotTy == cty.String && wantTy == cty.Number: + what := "string" + if !val.IsNull() { + what = strconv.Quote(val.AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) + default: + return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + return ret, nil + }, + }) +} + +// TypeFunc returns an encapsulated value containing its argument's type. This +// value is marked to allow us to limit the use of this function at the moment +// to only a few supported use cases. +var TypeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(types.TypeType), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + givenType := args[0].Type() + return cty.CapsuleVal(types.TypeType, &givenType).Mark(marks.TypeType), nil + }, +}) + +func Type(input []cty.Value) (cty.Value, error) { + return TypeFunc.Call(input) +} diff --git a/internal/terraform/lang/funcs/conversion_test.go b/internal/terraform/lang/funcs/conversion_test.go new file mode 100644 index 00000000..bb13a5c9 --- /dev/null +++ b/internal/terraform/lang/funcs/conversion_test.go @@ -0,0 +1,202 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestTo(t *testing.T) { + tests := []struct { + Value cty.Value + TargetTy cty.Type + Want cty.Value + Err string + }{ + { + cty.StringVal("a"), + cty.String, + cty.StringVal("a"), + ``, + }, + { + cty.UnknownVal(cty.String), + cty.String, + cty.UnknownVal(cty.String), + ``, + }, + { + cty.NullVal(cty.String), + cty.String, + cty.NullVal(cty.String), + ``, + }, + { + // This test case represents evaluating the expression tostring(null) + // from HCL, since null in HCL is cty.NullVal(cty.DynamicPseudoType). + // The result in that case should still be null, but a null specifically + // of type string. + cty.NullVal(cty.DynamicPseudoType), + cty.String, + cty.NullVal(cty.String), + ``, + }, + { + cty.StringVal("a").Mark("boop"), + cty.String, + cty.StringVal("a").Mark("boop"), + ``, + }, + { + cty.NullVal(cty.String).Mark("boop"), + cty.String, + cty.NullVal(cty.String).Mark("boop"), + ``, + }, + { + cty.True, + cty.String, + cty.StringVal("true"), + ``, + }, + { + cty.StringVal("a"), + cty.Bool, + cty.DynamicVal, + `cannot convert "a" to bool; only the strings "true" or "false" are allowed`, + }, + { + cty.StringVal("a").Mark("boop"), + cty.Bool, + cty.DynamicVal, + `cannot convert "a" to bool; only the strings "true" or "false" are allowed`, + }, + { + cty.StringVal("a").Mark(marks.Sensitive), + cty.Bool, + cty.DynamicVal, + `cannot convert this sensitive string to bool`, + }, + { + cty.StringVal("a"), + cty.Number, + cty.DynamicVal, + `cannot convert "a" to number; given string must be a decimal representation of a number`, + }, + { + cty.StringVal("a").Mark("boop"), + cty.Number, + cty.DynamicVal, + `cannot convert "a" to number; given string must be a decimal representation of a number`, + }, + { + cty.StringVal("a").Mark(marks.Sensitive), + cty.Number, + cty.DynamicVal, + `cannot convert this sensitive string to number`, + }, + { + cty.NullVal(cty.String), + cty.Number, + cty.NullVal(cty.Number), + ``, + }, + { + cty.UnknownVal(cty.Bool), + cty.String, + cty.UnknownVal(cty.String), + ``, + }, + { + cty.UnknownVal(cty.String), + cty.Bool, + cty.UnknownVal(cty.Bool), // conversion is optimistic + ``, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.True}), + cty.List(cty.String), + cty.ListVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("true")}), + ``, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.True}), + cty.Set(cty.String), + cty.SetVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("true")}), + ``, + }, + { + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.True}), + cty.Map(cty.String), + cty.MapVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("true")}), + ``, + }, + { + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("world").Mark("boop")}), + cty.Map(cty.String), + cty.MapVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("world").Mark("boop")}), + ``, + }, + { + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("world")}).Mark("boop"), + cty.Map(cty.String), + cty.MapVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("world")}).Mark("boop"), + ``, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world").Mark("boop")}), + cty.List(cty.String), + cty.ListVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world").Mark("boop")}), + ``, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world")}).Mark("boop"), + cty.List(cty.String), + cty.ListVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world")}).Mark("boop"), + ``, + }, + { + cty.EmptyTupleVal, + cty.String, + cty.DynamicVal, + `cannot convert tuple to string`, + }, + { + cty.UnknownVal(cty.EmptyTuple), + cty.String, + cty.DynamicVal, + `cannot convert tuple to string`, + }, + { + cty.EmptyObjectVal, + cty.Object(map[string]cty.Type{"foo": cty.String}), + cty.DynamicVal, + `incompatible object type for conversion: attribute "foo" is required`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("to %s(%#v)", test.TargetTy.FriendlyNameForConstraint(), test.Value), func(t *testing.T) { + f := MakeToFunc(test.TargetTy) + got, err := f.Call([]cty.Value{test.Value}) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/crypto.go b/internal/terraform/lang/funcs/crypto.go new file mode 100644 index 00000000..7c4ba4ad --- /dev/null +++ b/internal/terraform/lang/funcs/crypto.go @@ -0,0 +1,334 @@ +package funcs + +import ( + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + "strings" + + uuidv5 "github.com/google/uuid" + uuid "github.com/hashicorp/go-uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/ssh" +) + +var UUIDFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result, err := uuid.GenerateUUID() + if err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.StringVal(result), nil + }, +}) + +var UUIDV5Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "namespace", + Type: cty.String, + }, + { + Name: "name", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var namespace uuidv5.UUID + switch { + case args[0].AsString() == "dns": + namespace = uuidv5.NameSpaceDNS + case args[0].AsString() == "url": + namespace = uuidv5.NameSpaceURL + case args[0].AsString() == "oid": + namespace = uuidv5.NameSpaceOID + case args[0].AsString() == "x500": + namespace = uuidv5.NameSpaceX500 + default: + if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) + } + } + val := args[1].AsString() + return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil + }, +}) + +// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) +} + +// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) +} + +// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. +var BcryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "cost", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + defaultCost := 10 + + if len(args) > 1 { + var val int + if err := gocty.FromCtyValue(args[1], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + defaultCost = val + } + + if len(args) > 2 { + return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].AsString() + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. +var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) + +// MakeFileMd5Func constructs a function that is like Md5Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileMd5Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) +} + +// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. +var RsaDecryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "ciphertext", + Type: cty.String, + }, + { + Name: "privatekey", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + key := args[1].AsString() + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "failed to decode input %q: cipher text must be base64-encoded", s) + } + + rawKey, err := ssh.ParseRawPrivateKey([]byte(key)) + if err != nil { + var errStr string + switch e := err.(type) { + case asn1.SyntaxError: + errStr = strings.ReplaceAll(e.Error(), "asn1: syntax error", "invalid ASN1 data in the given private key") + case asn1.StructuralError: + errStr = strings.ReplaceAll(e.Error(), "asn1: struture error", "invalid ASN1 data in the given private key") + default: + errStr = fmt.Sprintf("invalid private key: %s", e) + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, errStr) + } + privateKey, ok := rawKey.(*rsa.PrivateKey) + if !ok { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "invalid private key type %t", rawKey) + } + + out, err := rsa.DecryptPKCS1v15(nil, privateKey, b) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decrypt: %s", err) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Sha1Func contructs a function that computes the SHA1 hash of a given string +// and encodes it with hexadecimal digits. +var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) + +// MakeFileSha1Func constructs a function that is like Sha1Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha1Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) +} + +// Sha256Func contructs a function that computes the SHA256 hash of a given string +// and encodes it with hexadecimal digits. +var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) + +// MakeFileSha256Func constructs a function that is like Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) +} + +// Sha512Func contructs a function that computes the SHA512 hash of a given string +// and encodes it with hexadecimal digits. +var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) + +// MakeFileSha512Func constructs a function that is like Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) +} + +func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + h := hf() + h.Write([]byte(s)) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + path := args[0].AsString() + f, err := openFile(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + defer f.Close() + + h := hf() + _, err = io.Copy(h, f) + if err != nil { + return cty.UnknownVal(cty.String), err + } + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +// UUID generates and returns a Type-4 UUID in the standard hexadecimal string +// format. +// +// This is not a pure function: it will generate a different result for each +// call. It must therefore be registered as an impure function in the function +// table in the "lang" package. +func UUID() (cty.Value, error) { + return UUIDFunc.Call(nil) +} + +// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string +// format. +func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { + return UUIDV5Func.Call([]cty.Value{namespace, name}) +} + +// Base64Sha256 computes the SHA256 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +func Base64Sha256(str cty.Value) (cty.Value, error) { + return Base64Sha256Func.Call([]cty.Value{str}) +} + +// Base64Sha512 computes the SHA512 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 +func Base64Sha512(str cty.Value) (cty.Value, error) { + return Base64Sha512Func.Call([]cty.Value{str}) +} + +// Bcrypt computes a hash of the given string using the Blowfish cipher, +// returning a string in the Modular Crypt Format +// usually expected in the shadow password file on many Unix systems. +func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(cost)+1) + args[0] = str + copy(args[1:], cost) + return BcryptFunc.Call(args) +} + +// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. +func Md5(str cty.Value) (cty.Value, error) { + return Md5Func.Call([]cty.Value{str}) +} + +// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding +// cleartext. +func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { + return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) +} + +// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. +func Sha1(str cty.Value) (cty.Value, error) { + return Sha1Func.Call([]cty.Value{str}) +} + +// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. +func Sha256(str cty.Value) (cty.Value, error) { + return Sha256Func.Call([]cty.Value{str}) +} + +// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. +func Sha512(str cty.Value) (cty.Value, error) { + return Sha512Func.Call([]cty.Value{str}) +} diff --git a/internal/terraform/lang/funcs/crypto_test.go b/internal/terraform/lang/funcs/crypto_test.go new file mode 100644 index 00000000..27977774 --- /dev/null +++ b/internal/terraform/lang/funcs/crypto_test.go @@ -0,0 +1,798 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" + "golang.org/x/crypto/bcrypt" +) + +func TestUUID(t *testing.T) { + result, err := UUID() + if err != nil { + t.Fatal(err) + } + + resultStr := result.AsString() + if got, want := len(resultStr), 36; got != want { + t.Errorf("wrong result length %d; want %d", got, want) + } +} + +func TestUUIDV5(t *testing.T) { + tests := []struct { + Namespace cty.Value + Name cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("dns"), + cty.StringVal("tada"), + cty.StringVal("faa898db-9b9d-5b75-86a9-149e7bb8e3b8"), + false, + }, + { + cty.StringVal("url"), + cty.StringVal("tada"), + cty.StringVal("2c1ff6b4-211f-577e-94de-d978b0caa16e"), + false, + }, + { + cty.StringVal("oid"), + cty.StringVal("tada"), + cty.StringVal("61eeea26-5176-5288-87fc-232d6ed30d2f"), + false, + }, + { + cty.StringVal("x500"), + cty.StringVal("tada"), + cty.StringVal("7e12415e-f7c9-57c3-9e43-52dc9950d264"), + false, + }, + { + cty.StringVal("6ba7b810-9dad-11d1-80b4-00c04fd430c8"), + cty.StringVal("tada"), + cty.StringVal("faa898db-9b9d-5b75-86a9-149e7bb8e3b8"), + false, + }, + { + cty.StringVal("tada"), + cty.StringVal("tada"), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("uuidv5(%#v, %#v)", test.Namespace, test.Name), func(t *testing.T) { + got, err := UUIDV5(test.Namespace, test.Name) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Sha256(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("n4bQgYhMfWWaL+qgxVrQFaO/TxsrC4Is0V1sFbDwCgg="), + false, + }, + // This would differ because we're base64-encoding hex represantiation, not raw bytes. + // base64encode(sha256("test")) = + // "OWY4NmQwODE4ODRjN2Q2NTlhMmZlYWEwYzU1YWQwMTVhM2JmNGYxYjJiMGI4MjJjZDE1ZDZjMTViMGYwMGEwOA==" + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64sha256(%#v)", test.String), func(t *testing.T) { + got, err := Base64Sha256(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileBase64Sha256(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("pZGm1Av0IEBKARczz7exkNYsZb8LzaMrV7J32a2fFG4="), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("47U1q9IZW093SmAzdC820Skpn8vHPvc8szud/Y3ezpo="), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA256 := MakeFileBase64Sha256Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filebase64sha256(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA256.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Sha512(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("7iaw3Ur350mqGo7jwQrpkj9hiYB3Lkc/iBml1JQODbJ6wYX4oOHV+E+IvIh/1nsUNzLDBMxfqa2Ob1f1ACio/w=="), + false, + }, + // This would differ because we're base64-encoding hex represantiation, not raw bytes + // base64encode(sha512("test")) = + // "OZWUyNmIwZGQ0YWY3ZTc0OWFhMWE4ZWUzYzEwYWU5OTIzZjYxODk4MDc3MmU0NzNmODgxOWE1ZDQ5NDBlMGRiMjdhYzE4NWY4YTBlMWQ1Zjg0Zjg4YmM4ODdmZDY3YjE0MzczMmMzMDRjYzVmYTlhZDhlNmY1N2Y1MDAyOGE4ZmY=" + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64sha512(%#v)", test.String), func(t *testing.T) { + got, err := Base64Sha512(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileBase64Sha512(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("LHT9F+2v2A6ER7DUZ0HuJDt+t03SFJoKsbkkb7MDgvJ+hT2FhXGeDmfL2g2qj1FnEGRhXWRa4nrLFb+xRH9Fmw=="), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("wSInO/tKEOaLGCAY2h/7gtLWMpzyLJ0ijFh95JTpYrPzXQYgviAdL9ZgpD9EAte8On+drvhFvjIFsfQUwxbNPQ=="), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA512 := MakeFileBase64Sha512Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filebase64sha512(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA512.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBcrypt(t *testing.T) { + // single variable test + p, err := Bcrypt(cty.StringVal("test")) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = bcrypt.CompareHashAndPassword([]byte(p.AsString()), []byte("test")) + if err != nil { + t.Fatalf("Error comparing hash and password: %s", err) + } + + // testing with two parameters + p, err = Bcrypt(cty.StringVal("test"), cty.NumberIntVal(5)) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = bcrypt.CompareHashAndPassword([]byte(p.AsString()), []byte("test")) + if err != nil { + t.Fatalf("Error comparing hash and password: %s", err) + } + + // Negative test for more than two parameters + _, err = Bcrypt(cty.StringVal("test"), cty.NumberIntVal(10), cty.NumberIntVal(11)) + if err == nil { + t.Fatal("succeeded; want error") + } +} + +func TestMd5(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("tada"), + cty.StringVal("ce47d07243bb6eaf5e1322c81baf9bbf"), + false, + }, + { // Confirm that we're not trimming any whitespaces + cty.StringVal(" tada "), + cty.StringVal("aadf191a583e53062de2d02c008141c4"), + false, + }, + { // We accept empty string too + cty.StringVal(""), + cty.StringVal("d41d8cd98f00b204e9800998ecf8427e"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("md5(%#v)", test.String), func(t *testing.T) { + got, err := Md5(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileMD5(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("b10a8db164e0754105b7a99be72e3fe5"), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("d7e6c283185a1078c58213beadca98b0"), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileMD5 := MakeFileMd5Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filemd5(%#v)", test.Path), func(t *testing.T) { + got, err := fileMD5.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestRsaDecrypt(t *testing.T) { + tests := []struct { + Ciphertext cty.Value + Privatekey cty.Value + Want cty.Value + Err string + }{ + // Base-64 encoded cipher decrypts correctly + { + cty.StringVal(CipherBase64), + cty.StringVal(PrivateKey), + cty.StringVal("message"), + "", + }, + // OpenSSH key format + { + cty.StringVal(CipherBase64), + cty.StringVal(OpenSSHPrivateKey), + cty.StringVal("message"), + "", + }, + // Wrong key + { + cty.StringVal(CipherBase64), + cty.StringVal(WrongPrivateKey), + cty.UnknownVal(cty.String), + "failed to decrypt: crypto/rsa: decryption error", + }, + // Bad key + { + cty.StringVal(CipherBase64), + cty.StringVal(BadPrivateKey), + cty.UnknownVal(cty.String), + "invalid ASN1 data in the given private key: data truncated", + }, + // Empty key + { + cty.StringVal(CipherBase64), + cty.StringVal(""), + cty.UnknownVal(cty.String), + "invalid private key: ssh: no key found", + }, + // Bad ciphertext + { + cty.StringVal("bad"), + cty.StringVal(PrivateKey), + cty.UnknownVal(cty.String), + `failed to decode input "bad": cipher text must be base64-encoded`, + }, + // Empty ciphertext + { + cty.StringVal(""), + cty.StringVal(PrivateKey), + cty.UnknownVal(cty.String), + "failed to decrypt: crypto/rsa: decryption error", + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("RsaDecrypt(%#v, %#v)", test.Ciphertext, test.Privatekey), func(t *testing.T) { + got, err := RsaDecrypt(test.Ciphertext, test.Privatekey) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } else if err.Error() != test.Err { + t.Fatalf("wrong error\ngot: %s\nwant: %s", err.Error(), test.Err) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSha1(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sha1(%#v)", test.String), func(t *testing.T) { + got, err := Sha1(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSHA1(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("0a4d55a8d778e5022fab701977c5d840bbc486d0"), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("2821bcc8379e1bd6f4f31b1e6a1fbb204b4a8be8"), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA1 := MakeFileSha1Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filesha1(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA1.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSha256(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sha256(%#v)", test.String), func(t *testing.T) { + got, err := Sha256(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSHA256(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e"), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("e3b535abd2195b4f774a6033742f36d129299fcbc73ef73cb33b9dfd8ddece9a"), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA256 := MakeFileSha256Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filesha256(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA256.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSha512(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("ee26b0dd4af7e749aa1a8ee3c10ae9923f618980772e473f8819a5d4940e0db27ac185f8a0e1d5f84f88bc887fd67b143732c304cc5fa9ad8e6f57f50028a8ff"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sha512(%#v)", test.String), func(t *testing.T) { + got, err := Sha512(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSHA512(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("2c74fd17edafd80e8447b0d46741ee243b7eb74dd2149a0ab1b9246fb30382f27e853d8585719e0e67cbda0daa8f51671064615d645ae27acb15bfb1447f459b"), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("c122273bfb4a10e68b182018da1ffb82d2d6329cf22c9d228c587de494e962b3f35d0620be201d2fd660a43f4402d7bc3a7f9daef845be3205b1f414c316cd3d"), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA512 := MakeFileSha512Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filesha512(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA512.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +const ( + CipherBase64 = "eczGaDhXDbOFRZGhjx2etVzWbRqWDlmq0bvNt284JHVbwCgObiuyX9uV0LSAMY707IEgMkExJqXmsB4OWKxvB7epRB9G/3+F+pcrQpODlDuL9oDUAsa65zEpYF0Wbn7Oh7nrMQncyUPpyr9WUlALl0gRWytOA23S+y5joa4M34KFpawFgoqTu/2EEH4Xl1zo+0fy73fEto+nfkUY+meuyGZ1nUx/+DljP7ZqxHBFSlLODmtuTMdswUbHbXbWneW51D7Jm7xB8nSdiA2JQNK5+Sg5x8aNfgvFTt/m2w2+qpsyFa5Wjeu6fZmXSl840CA07aXbk9vN4I81WmJyblD/ZA==" + PrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9 +c1zEekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPV +Xcxae4MR0BEegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER +1v6eHQa/nchi03MBpT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7 +r6v24u/vp/QTmBIAlNPgadVAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZ +pqcAM8wHRph8mD1EfL9hsz77pHewxolBATV+7QIDAQABAoIBAC1rK+kFW3vrAYm3 ++8/fQnQQw5nec4o6+crng6JVQXLeH32qXShNf8kLLG/Jj0vaYcTPPDZw9JCKkTMQ +0mKj9XR/5DLbBMsV6eNXXuvJJ3x4iKW5eD9WkLD4FKlNarBRyO7j8sfPTqXW7uat +NxWdFH7YsSRvNh/9pyQHLWA5OituidMrYbc3EUx8B1GPNyJ9W8Q8znNYLfwYOjU4 +Wv1SLE6qGQQH9Q0WzA2WUf8jklCYyMYTIywAjGb8kbAJlKhmj2t2Igjmqtwt1PYc +pGlqbtQBDUiWXt5S4YX/1maIQ/49yeNUajjpbJiH3DbhJbHwFTzP3pZ9P9GHOzlG +kYR+wSECgYEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvd +qcliF5vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUCgYEAqNsw +1aSl7WJa27F0DoJdlU9LWerpXcazlJcIdOz/S9QDmSK3RDQTdqfTxRmrxiYI9LEs +mkOkvzlnnOBMpnZ3ZOU5qIRfprecRIi37KDAOHWGnlC0EWGgl46YLb7/jXiWf0AG +Y+DfJJNd9i6TbIDWu8254/erAS6bKMhW/3q7f2kCgYAZ7Id/BiKJAWRpqTRBXlvw +BhXoKvjI2HjYP21z/EyZ+PFPzur/lNaZhIUlMnUfibbwE9pFggQzzf8scM7c7Sf+ +mLoVSdoQ/Rujz7CqvQzi2nKSsM7t0curUIb3lJWee5/UeEaxZcmIufoNUrzohAWH +BJOIPDM4ssUTLRq7wYM9uQKBgHCBau5OP8gE6mjKuXsZXWUoahpFLKwwwmJUp2vQ +pOFPJ/6WZOlqkTVT6QPAcPUbTohKrF80hsZqZyDdSfT3peFx4ZLocBrS56m6NmHR +UYHMvJ8rQm76T1fryHVidz85g3zRmfBeWg8yqT5oFg4LYgfLsPm1gRjOhs8LfPvI +OLlRAoGBAIZ5Uv4Z3s8O7WKXXUe/lq6j7vfiVkR1NW/Z/WLKXZpnmvJ7FgxN4e56 +RXT7GwNQHIY8eDjDnsHxzrxd+raOxOZeKcMHj3XyjCX3NHfTscnsBPAGYpY/Wxzh +T8UYnFu6RzkixElTf2rseEav7rkdKkI3LAeIZy7B0HulKKsmqVQ7 +-----END RSA PRIVATE KEY----- +` + OpenSSHPrivateKey = ` +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9c1zE +ekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPVXcxae4MR0B +EegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER1v6eHQa/nchi03MB +pT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7r6v24u/vp/QTmBIAlNPgad +VAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZpqcAM8wHRph8mD1EfL9hsz77pHew +xolBATV+7QAAA7jbhEFk24RBZAAAAAdzc2gtcnNhAAABAQCBQSVXmbCqSWgiszxk1nvaBI +JydIm3v79Sxrkb4PXlhBQB1/1zXMR6RN8eAM/7TifD+4U0VoTm/VFsdo/GFlhWDlkSs0Jr ++HOf7HXTHNx6l5Lco9VdzFp7gxHQER6C+pmonM32Whew0v9zcf8H7YaV7eFPGOVYVvcXmo +uBH7gx/iu6ERHW/p4dBr+dyGLTcwGlPhR4nsysv3aFMlgt2lLIKqavzKPGQokNULa5Guv6 +xNLF+Huvq/bi7++n9BOYEgCU0+Bp1UBnDXuI01vu+NXsbCX/mAdeicJQpRFpX750E0usch +mmpwAzzAdGmHyYPUR8v2GzPvukd7DGiUEBNX7tAAAAAwEAAQAAAQAtayvpBVt76wGJt/vP +30J0EMOZ3nOKOvnK54OiVUFy3h99ql0oTX/JCyxvyY9L2mHEzzw2cPSQipEzENJio/V0f+ +Qy2wTLFenjV17rySd8eIiluXg/VpCw+BSpTWqwUcju4/LHz06l1u7mrTcVnRR+2LEkbzYf +/ackBy1gOTorbonTK2G3NxFMfAdRjzcifVvEPM5zWC38GDo1OFr9UixOqhkEB/UNFswNll +H/I5JQmMjGEyMsAIxm/JGwCZSoZo9rdiII5qrcLdT2HKRpam7UAQ1Ill7eUuGF/9ZmiEP+ +PcnjVGo46WyYh9w24SWx8BU8z96WfT/Rhzs5RpGEfsEhAAAAgQCGeVL+Gd7PDu1il11Hv5 +auo+734lZEdTVv2f1iyl2aZ5ryexYMTeHuekV0+xsDUByGPHg4w57B8c68Xfq2jsTmXinD +B4918owl9zR307HJ7ATwBmKWP1sc4U/FGJxbukc5IsRJU39q7HhGr+65HSpCNywHiGcuwd +B7pSirJqlUOwAAAIEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvdqcliF5 +vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUAAACBAKjbMNWkpe1iWtux +dA6CXZVPS1nq6V3Gs5SXCHTs/0vUA5kit0Q0E3an08UZq8YmCPSxLJpDpL85Z5zgTKZ2d2 +TlOaiEX6a3nESIt+ygwDh1hp5QtBFhoJeOmC2+/414ln9ABmPg3ySTXfYuk2yA1rvNueP3 +qwEumyjIVv96u39pAAAAAAEC +-----END OPENSSH PRIVATE KEY----- +` + WrongPrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAlrCgnEVgmNKCq7KPc+zUU5IrxPu1ClMNJS7RTsTPEkbwe5SB +p+6V6WtCbD/X/lDRRGbOENChh1Phulb7lViqgrdpHydgsrKoS5ah3DfSIxLFLE00 +9Yo4TCYwgw6+s59j16ZAFVinaQ9l6Kmrb2ll136hMrz8QKh+qw+onOLd38WFgm+W +ZtUqSXf2LANzfzzy4OWFNyFqKaCAolSkPdTS9Nz+svtScvp002DQp8OdP1AgPO+l +o5N3M38Fftapwg0pCtJ5Zq0NRWIXEonXiTEMA6zy3gEZVOmDxoIFUWnmrqlMJLFy +5S6LDrHSdqJhCxDK6WRZj43X9j8spktk3eGhMwIDAQABAoIBAAem8ID/BOi9x+Tw +LFi2rhGQWqimH4tmrEQ3HGnjlKBY+d1MrUjZ1MMFr1nP5CgF8pqGnfA8p/c3Sz8r +K5tp5T6+EZiDZ2WrrOApxg5ox0MAsQKO6SGO40z6o3wEQ6rbbTaGOrraxaWQIpyu +AQanU4Sd6ZGqByVBaS1GnklZO+shCHqw73b7g1cpLEmFzcYnKHYHlUUIsstMe8E1 +BaCY0CH7JbWBjcbiTnBVwIRZuu+EjGiQuhTilYL2OWqoMVg1WU0L2IFpR8lkf/2W +SBx5J6xhwbBGASOpM+qidiN580GdPzGhWYSqKGroHEzBm6xPSmV1tadNA26WFG4p +pthLiAECgYEA5BsPRpNYJAQLu5B0N7mj9eEp0HABVEgL/MpwiImjaKdAwp78HM64 +IuPvJxs7r+xESiIz4JyjR8zrQjYOCKJsARYkmNlEuAz0SkHabCw1BdEBwUhjUGVB +efoERK6GxfAoNqmSDwsOvHFOtsmDIlbHmg7G2rUxNVpeou415BSB0B8CgYEAqR4J +YHKk2Ibr9rU+rBU33TcdTGw0aAkFNAVeqM9j0haWuFXmV3RArgoy09lH+2Ha6z/g +fTX2xSDAWV7QUlLOlBRIhurPAo2jO2yCrGHPZcWiugstrR2hTTInigaSnCmK3i7F +6sYmL3S7K01IcVNxSlWvGijtClT92Cl2WUCTfG0CgYAiEjyk4QtQTd5mxLvnOu5X +oqs5PBGmwiAwQRiv/EcRMbJFn7Oupd3xMDSflbzDmTnWDOfMy/jDl8MoH6TW+1PA +kcsjnYhbKWwvz0hN0giVdtOZSDO1ZXpzOrn6fEsbM7T9/TQY1SD9WrtUKCNTNL0Z +sM1ZC6lu+7GZCpW4HKwLJwKBgQCRT0yxQXBg1/UxwuO5ynV4rx2Oh76z0WRWIXMH +S0MyxdP1SWGkrS/SGtM3cg/GcHtA/V6vV0nUcWK0p6IJyjrTw2XZ/zGluPuTWJYi +9dvVT26Vunshrz7kbH7KuwEICy3V4IyQQHeY+QzFlR70uMS0IVFWAepCoWqHbIDT +CYhwNQKBgGPcLXmjpGtkZvggl0aZr9LsvCTckllSCFSI861kivL/rijdNoCHGxZv +dfDkLTLcz9Gk41rD9Gxn/3sqodnTAc3Z2PxFnzg1Q/u3+x6YAgBwI/g/jE2xutGW +H7CurtMwALQ/n/6LUKFmjRZjqbKX9SO2QSaC3grd6sY9Tu+bZjLe +-----END RSA PRIVATE KEY----- +` + BadPrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9 +c1zEekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPV +Xcxae4MR0BEegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER +1v6eHQa/nchi03MBpT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7 +r6v24u/vp/QTmBIAlNPgadVAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZ +pqcAM8wHRph8mD1EfL9hsz77pHewxolBATV+7QIDAQABAoIBAC1rK+kFW3vrAYm3 ++8/fQnQQw5nec4o6+crng6JVQXLeH32qXShNf8kLLG/Jj0vaYcTPPDZw9JCKkTMQ +0mKj9XR/5DLbBMsV6eNXXuvJJ3x4iKW5eD9WkLD4FKlNarBRyO7j8sfPTqXW7uat +NxWdFH7YsSRvNh/9pyQHLWA5OituidMrYbc3EUx8B1GPNyJ9W8Q8znNYLfwYOjU4 +Wv1SLE6qGQQH9Q0WzA2WUf8jklCYyMYTIywAjGb8kbAJlKhmj2t2Igjmqtwt1PYc +pGlqbtQBDUiWXt5S4YX/1maIQ/49yeNUajjpbJiH3DbhJbHwFTzP3pZ9P9GHOzlG +kYR+wSECgYEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvd +qcliF5vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUCgYEAqNsw +1aSl7WJa27F0DoJdlU9LWerpXcazlJcIdOz/S9QDmSK3RDQTdqfTxRmrxiYI9LEs +mkOkvzlnnOBMpnZ3ZOU5qIRfprecRIi37KDAOHWGnlC0EWGgl46YLb7/jXiWf0AG +BhXoKvjI2HjYP21z/EyZ+PFPzur/lNaZhIUlMnUfibbwE9pFggQzzf8scM7c7Sf+ +mLoVSdoQ/Rujz7CqvQzi2nKSsM7t0curUIb3lJWee5/UeEaxZcmIufoNUrzohAWH +BJOIPDM4ssUTLRq7wYM9uQKBgHCBau5OP8gE6mjKuXsZXWUoahpFLKwwwmJUp2vQ +pOFPJ/6WZOlqkTVT6QPAcPUbTohKrF80hsZqZyDdSfT3peFx4ZLocBrS56m6NmHR +UYHMvJ8rQm76T1fryHVidz85g3zRmfBeWg8yqT5oFg4LYgfLsPm1gRjOhs8LfPvI +OLlRAoGBAIZ5Uv4Z3s8O7WKXXUe/lq6j7vfiVkR1NW/Z/WLKXZpnmvJ7FgxN4e56 +RXT7GwNQHIY8eDjDnsHxzrxd+raOxOZeKcMHj3XyjCX3NHfTscnsBPAGYpY/Wxzh +T8UYnFu6RzkixElTf2rseEav7rkdKkI3LAeIZy7B0HulKKsmqVQ7 +-----END RSA PRIVATE KEY----- +` +) diff --git a/internal/terraform/lang/funcs/datetime.go b/internal/terraform/lang/funcs/datetime.go new file mode 100644 index 00000000..5dae1987 --- /dev/null +++ b/internal/terraform/lang/funcs/datetime.go @@ -0,0 +1,70 @@ +package funcs + +import ( + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// TimestampFunc constructs a function that returns a string representation of the current date and time. +var TimestampFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil + }, +}) + +// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. +var TimeAddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp", + Type: cty.String, + }, + { + Name: "duration", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ts, err := time.Parse(time.RFC3339, args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + duration, err := time.ParseDuration(args[1].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil + }, +}) + +// Timestamp returns a string representation of the current date and time. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax, and so timestamp +// returns a string in this format. +func Timestamp() (cty.Value, error) { + return TimestampFunc.Call([]cty.Value{}) +} + +// TimeAdd adds a duration to a timestamp, returning a new timestamp. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires +// the timestamp argument to be a string conforming to this syntax. +// +// `duration` is a string representation of a time difference, consisting of +// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted +// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first +// number may be negative to indicate a negative duration, like `"-2h5m"`. +// +// The result is a string, also in RFC 3339 format, representing the result +// of adding the given direction to the given timestamp. +func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { + return TimeAddFunc.Call([]cty.Value{timestamp, duration}) +} diff --git a/internal/terraform/lang/funcs/datetime_test.go b/internal/terraform/lang/funcs/datetime_test.go new file mode 100644 index 00000000..6ba4b1ed --- /dev/null +++ b/internal/terraform/lang/funcs/datetime_test.go @@ -0,0 +1,85 @@ +package funcs + +import ( + "fmt" + "testing" + "time" + + "github.com/zclconf/go-cty/cty" +) + +func TestTimestamp(t *testing.T) { + currentTime := time.Now().UTC() + result, err := Timestamp() + if err != nil { + t.Fatalf("err: %s", err) + } + resultTime, err := time.Parse(time.RFC3339, result.AsString()) + if err != nil { + t.Fatalf("Error parsing timestamp: %s", err) + } + + if resultTime.Sub(currentTime).Seconds() > 10.0 { + t.Fatalf("Timestamp Diff too large. Expected: %s\nReceived: %s", currentTime.Format(time.RFC3339), result.AsString()) + } + +} + +func TestTimeadd(t *testing.T) { + tests := []struct { + Time cty.Value + Duration cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("1s"), + cty.StringVal("2017-11-22T00:00:01Z"), + false, + }, + { + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("10m1s"), + cty.StringVal("2017-11-22T00:10:01Z"), + false, + }, + { // also support subtraction + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("-1h"), + cty.StringVal("2017-11-21T23:00:00Z"), + false, + }, + { // Invalid format timestamp + cty.StringVal("2017-11-22"), + cty.StringVal("-1h"), + cty.UnknownVal(cty.String), + true, + }, + { // Invalid format duration (day is not supported by ParseDuration) + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("1d"), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("TimeAdd(%#v, %#v)", test.Time, test.Duration), func(t *testing.T) { + got, err := TimeAdd(test.Time, test.Duration) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/defaults.go b/internal/terraform/lang/funcs/defaults.go new file mode 100644 index 00000000..e0bf4b07 --- /dev/null +++ b/internal/terraform/lang/funcs/defaults.go @@ -0,0 +1,288 @@ +package funcs + +import ( + "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// DefaultsFunc is a helper function for substituting default values in +// place of null values in a given data structure. +// +// See the documentation for function Defaults for more information. +var DefaultsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "input", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowMarked: true, + }, + { + Name: "defaults", + Type: cty.DynamicPseudoType, + AllowMarked: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // The result type is guaranteed to be the same as the input type, + // since all we're doing is replacing null values with non-null + // values of the same type. + retType := args[0].Type() + defaultsType := args[1].Type() + + // This function is aimed at filling in object types or collections + // of object types where some of the attributes might be null, so + // it doesn't make sense to use a primitive type directly with it. + // (The "coalesce" function may be appropriate for such cases.) + if retType.IsPrimitiveType() { + // This error message is a bit of a fib because we can actually + // apply defaults to tuples too, but we expect that to be so + // unusual as to not be worth mentioning here, because mentioning + // it would require using some less-well-known Terraform language + // terminology in the message (tuple types, structural types). + return cty.DynamicPseudoType, function.NewArgErrorf(1, "only object types and collections of object types can have defaults applied") + } + + defaultsPath := make(cty.Path, 0, 4) // some capacity so that most structures won't reallocate + if err := defaultsAssertSuitableFallback(retType, defaultsType, defaultsPath); err != nil { + errMsg := tfdiags.FormatError(err) // add attribute path prefix + return cty.DynamicPseudoType, function.NewArgErrorf(1, "%s", errMsg) + } + + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if args[0].Type().HasDynamicTypes() { + // If the types our input object aren't known yet for some reason + // then we'll defer all of our work here, because our + // interpretation of the defaults depends on the types in + // the input. + return cty.UnknownVal(retType), nil + } + + v := defaultsApply(args[0], args[1]) + return v, nil + }, +}) + +func defaultsApply(input, fallback cty.Value) cty.Value { + wantTy := input.Type() + + umInput, inputMarks := input.Unmark() + umFb, fallbackMarks := fallback.Unmark() + + // If neither are known, we very conservatively return an unknown value + // with the union of marks on both input and default. + if !(umInput.IsKnown() && umFb.IsKnown()) { + return cty.UnknownVal(wantTy).WithMarks(inputMarks).WithMarks(fallbackMarks) + } + + // For the rest of this function we're assuming that the given defaults + // will always be valid, because we expect to have caught any problems + // during the type checking phase. Any inconsistencies that reach here are + // therefore considered to be implementation bugs, and so will panic. + + // Our strategy depends on the kind of type we're working with. + switch { + case wantTy.IsPrimitiveType(): + // For leaf primitive values the rule is relatively simple: use the + // input if it's non-null, or fallback if input is null. + if !umInput.IsNull() { + return input + } + v, err := convert.Convert(umFb, wantTy) + if err != nil { + // Should not happen because we checked in defaultsAssertSuitableFallback + panic(err.Error()) + } + return v.WithMarks(fallbackMarks) + + case wantTy.IsObjectType(): + // For structural types, a null input value must be passed through. We + // do not apply default values for missing optional structural values, + // only their contents. + // + // We also pass through the input if the fallback value is null. This + // can happen if the given defaults do not include a value for this + // attribute. + if umInput.IsNull() || umFb.IsNull() { + return input + } + atys := wantTy.AttributeTypes() + ret := map[string]cty.Value{} + for attr, aty := range atys { + inputSub := umInput.GetAttr(attr) + fallbackSub := cty.NullVal(aty) + if umFb.Type().HasAttribute(attr) { + fallbackSub = umFb.GetAttr(attr) + } + ret[attr] = defaultsApply(inputSub.WithMarks(inputMarks), fallbackSub.WithMarks(fallbackMarks)) + } + return cty.ObjectVal(ret) + + case wantTy.IsTupleType(): + // For structural types, a null input value must be passed through. We + // do not apply default values for missing optional structural values, + // only their contents. + // + // We also pass through the input if the fallback value is null. This + // can happen if the given defaults do not include a value for this + // attribute. + if umInput.IsNull() || umFb.IsNull() { + return input + } + + l := wantTy.Length() + ret := make([]cty.Value, l) + for i := 0; i < l; i++ { + inputSub := umInput.Index(cty.NumberIntVal(int64(i))) + fallbackSub := umFb.Index(cty.NumberIntVal(int64(i))) + ret[i] = defaultsApply(inputSub.WithMarks(inputMarks), fallbackSub.WithMarks(fallbackMarks)) + } + return cty.TupleVal(ret) + + case wantTy.IsCollectionType(): + // For collection types we apply a single fallback value to each + // element of the input collection, because in the situations this + // function is intended for we assume that the number of elements + // is the caller's decision, and so we'll just apply the same defaults + // to all of the elements. + ety := wantTy.ElementType() + switch { + case wantTy.IsMapType(): + newVals := map[string]cty.Value{} + + if !umInput.IsNull() { + for it := umInput.ElementIterator(); it.Next(); { + k, v := it.Element() + newVals[k.AsString()] = defaultsApply(v.WithMarks(inputMarks), fallback.WithMarks(fallbackMarks)) + } + } + + if len(newVals) == 0 { + return cty.MapValEmpty(ety) + } + return cty.MapVal(newVals) + case wantTy.IsListType(), wantTy.IsSetType(): + var newVals []cty.Value + + if !umInput.IsNull() { + for it := umInput.ElementIterator(); it.Next(); { + _, v := it.Element() + newV := defaultsApply(v.WithMarks(inputMarks), fallback.WithMarks(fallbackMarks)) + newVals = append(newVals, newV) + } + } + + if len(newVals) == 0 { + if wantTy.IsSetType() { + return cty.SetValEmpty(ety) + } + return cty.ListValEmpty(ety) + } + if wantTy.IsSetType() { + return cty.SetVal(newVals) + } + return cty.ListVal(newVals) + default: + // There are no other collection types, so this should not happen + panic(fmt.Sprintf("invalid collection type %#v", wantTy)) + } + default: + // We should've caught anything else in defaultsAssertSuitableFallback, + // so this should not happen. + panic(fmt.Sprintf("invalid target type %#v", wantTy)) + } +} + +func defaultsAssertSuitableFallback(wantTy, fallbackTy cty.Type, fallbackPath cty.Path) error { + // If the type we want is a collection type then we need to keep peeling + // away collection type wrappers until we find the non-collection-type + // that's underneath, which is what the fallback will actually be applied + // to. + inCollection := false + for wantTy.IsCollectionType() { + wantTy = wantTy.ElementType() + inCollection = true + } + + switch { + case wantTy.IsPrimitiveType(): + // The fallback is valid if it's equal to or convertible to what we want. + if fallbackTy.Equals(wantTy) { + return nil + } + conversion := convert.GetConversion(fallbackTy, wantTy) + if conversion == nil { + msg := convert.MismatchMessage(fallbackTy, wantTy) + return fallbackPath.NewErrorf("invalid default value for %s: %s", wantTy.FriendlyName(), msg) + } + return nil + case wantTy.IsObjectType(): + if !fallbackTy.IsObjectType() { + if inCollection { + return fallbackPath.NewErrorf("the default value for a collection of an object type must itself be an object type, not %s", fallbackTy.FriendlyName()) + } + return fallbackPath.NewErrorf("the default value for an object type must itself be an object type, not %s", fallbackTy.FriendlyName()) + } + for attr, wantAty := range wantTy.AttributeTypes() { + if !fallbackTy.HasAttribute(attr) { + continue // it's always okay to not have a default value + } + fallbackSubpath := fallbackPath.GetAttr(attr) + fallbackSubTy := fallbackTy.AttributeType(attr) + err := defaultsAssertSuitableFallback(wantAty, fallbackSubTy, fallbackSubpath) + if err != nil { + return err + } + } + for attr := range fallbackTy.AttributeTypes() { + if !wantTy.HasAttribute(attr) { + fallbackSubpath := fallbackPath.GetAttr(attr) + return fallbackSubpath.NewErrorf("target type does not expect an attribute named %q", attr) + } + } + return nil + case wantTy.IsTupleType(): + if !fallbackTy.IsTupleType() { + if inCollection { + return fallbackPath.NewErrorf("the default value for a collection of a tuple type must itself be a tuple type, not %s", fallbackTy.FriendlyName()) + } + return fallbackPath.NewErrorf("the default value for a tuple type must itself be a tuple type, not %s", fallbackTy.FriendlyName()) + } + wantEtys := wantTy.TupleElementTypes() + fallbackEtys := fallbackTy.TupleElementTypes() + if got, want := len(wantEtys), len(fallbackEtys); got != want { + return fallbackPath.NewErrorf("the default value for a tuple type of length %d must also have length %d, not %d", want, want, got) + } + for i := 0; i < len(wantEtys); i++ { + fallbackSubpath := fallbackPath.IndexInt(i) + wantSubTy := wantEtys[i] + fallbackSubTy := fallbackEtys[i] + err := defaultsAssertSuitableFallback(wantSubTy, fallbackSubTy, fallbackSubpath) + if err != nil { + return err + } + } + return nil + default: + // No other types are supported right now. + return fallbackPath.NewErrorf("cannot apply defaults to %s", wantTy.FriendlyName()) + } +} + +// Defaults is a helper function for substituting default values in +// place of null values in a given data structure. +// +// This is primarily intended for use with a module input variable that +// has an object type constraint (or a collection thereof) that has optional +// attributes, so that the receiver of a value that omits those attributes +// can insert non-null default values in place of the null values caused by +// omitting the attributes. +func Defaults(input, defaults cty.Value) (cty.Value, error) { + return DefaultsFunc.Call([]cty.Value{input, defaults}) +} diff --git a/internal/terraform/lang/funcs/defaults_test.go b/internal/terraform/lang/funcs/defaults_test.go new file mode 100644 index 00000000..e4016326 --- /dev/null +++ b/internal/terraform/lang/funcs/defaults_test.go @@ -0,0 +1,648 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestDefaults(t *testing.T) { + tests := []struct { + Input, Defaults cty.Value + Want cty.Value + WantErr string + }{ + { // When *either* input or default are unknown, an unknown is returned. + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + }, + { + // When *either* input or default are unknown, an unknown is + // returned with marks from both input and defaults. + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello").Mark("marked"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String).Mark("marked"), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hey"), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hey"), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{}), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{}), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + WantErr: `.a: target type does not expect an attribute named "a"`, + }, + + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("hey"), + cty.StringVal("hello"), + }), + }), + }, + { + // Using defaults with single set elements is a pretty + // odd thing to do, but this behavior is just here because + // it generalizes from how we handle collections. It's + // tested only to ensure it doesn't change accidentally + // in future. + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.StringVal("hey"), + cty.StringVal("hello"), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "x": cty.NullVal(cty.String), + "y": cty.StringVal("hey"), + "z": cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "x": cty.StringVal("hello"), + "y": cty.StringVal("hey"), + "z": cty.StringVal("hello"), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + }, + { + Input: cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + Want: cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("boop"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("boop"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + // After applying defaults, the one with a null value + // coalesced with the one with a non-null value, + // and so there's only one left. + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + "beep": cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + "beep": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + WantErr: `.a: the default value for a collection of an object type must itself be an object type, not string`, + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + // The default value for a list must be a single value + // of the list's element type which provides defaults + // for each element separately, so the default for a + // list of string should be just a single string, not + // a list of string. + "a": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + }), + }), + WantErr: `.a: invalid default value for string: string required`, + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + WantErr: `.a: the default value for a tuple type must itself be a tuple type, not string`, + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.StringVal("hello 0"), + cty.StringVal("hello 1"), + cty.StringVal("hello 2"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.StringVal("hello 0"), + cty.StringVal("hey"), + cty.StringVal("hello 2"), + }), + }), + }, + { + // There's no reason to use this function for plain primitive + // types, because the "default" argument in a variable definition + // already has the equivalent behavior. This function is only + // to deal with the situation of a complex-typed variable where + // only parts of the data structure are optional. + Input: cty.NullVal(cty.String), + Defaults: cty.StringVal("hello"), + WantErr: `only object types and collections of object types can have defaults applied`, + }, + // When applying default values to structural types, null objects or + // tuples in the input should be passed through. + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Object(map[string]cty.Type{ + "x": cty.String, + "y": cty.String, + })), + "b": cty.NullVal(cty.Tuple([]cty.Type{cty.String, cty.String})), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "x": cty.StringVal("hello"), + "y": cty.StringVal("there"), + }), + "b": cty.TupleVal([]cty.Value{ + cty.StringVal("how are"), + cty.StringVal("you?"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Object(map[string]cty.Type{ + "x": cty.String, + "y": cty.String, + })), + "b": cty.NullVal(cty.Tuple([]cty.Type{cty.String, cty.String})), + }), + }, + // When applying default values to structural types, we permit null + // values in the defaults, and just pass through the input value. + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "p": cty.StringVal("xyz"), + "q": cty.StringVal("xyz"), + }), + }), + "b": cty.SetVal([]cty.Value{ + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(0), + cty.NumberIntVal(2), + }), + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(3), + }), + }), + "c": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("tada"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "p": cty.StringVal("xyz"), + "q": cty.StringVal("xyz"), + }), + }), + "b": cty.SetVal([]cty.Value{ + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(0), + cty.NumberIntVal(2), + }), + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(3), + }), + }), + "c": cty.StringVal("tada"), + }), + }, + // When applying default values to collection types, null collections in the + // input should result in empty collections in the output. + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.List(cty.String)), + "b": cty.NullVal(cty.Map(cty.String)), + "c": cty.NullVal(cty.Set(cty.String)), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + "b": cty.StringVal("hi"), + "c": cty.StringVal("greetings"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListValEmpty(cty.String), + "b": cty.MapValEmpty(cty.String), + "c": cty.SetValEmpty(cty.String), + }), + }, + // When specifying fallbacks, we allow mismatched primitive attribute + // types so long as a safe conversion is possible. This means that we + // can accept number or boolean values for string attributes. + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + "b": cty.NullVal(cty.String), + "c": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NumberIntVal(5), + "b": cty.True, + "c": cty.StringVal("greetings"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("5"), + "b": cty.StringVal("true"), + "c": cty.StringVal("greetings"), + }), + }, + // Fallbacks with mismatched primitive attribute types which do not + // have safe conversions must not pass the suitable fallback check, + // even if unsafe conversion would be possible. + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Bool), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("5"), + }), + WantErr: ".a: invalid default value for bool: bool required", + }, + // marks: we should preserve marks from both input value and defaults as leafily as possible + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello").Mark("world"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello").Mark("world"), + }), + }, + { // "unused" marks don't carry over + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String).Mark("a"), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + }, + { // Marks on tuples remain attached to individual elements + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey").Mark("input"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.StringVal("hello 0").Mark("fallback"), + cty.StringVal("hello 1"), + cty.StringVal("hello 2"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.StringVal("hello 0").Mark("fallback"), + cty.StringVal("hey").Mark("input"), + cty.StringVal("hello 2"), + }), + }), + }, + { // Marks from list elements + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey").Mark("input"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello 0").Mark("fallback"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("hello 0").Mark("fallback"), + cty.StringVal("hey").Mark("input"), + cty.StringVal("hello 0").Mark("fallback"), + }), + }), + }, + { + // Sets don't allow individually-marked elements, so the marks + // end up aggregating on the set itself anyway in this case. + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.NullVal(cty.String), + cty.NullVal(cty.String), + cty.StringVal("hey").Mark("input"), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello 0").Mark("fallback"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.StringVal("hello 0"), + cty.StringVal("hey"), + cty.StringVal("hello 0"), + }).WithMarks(cty.NewValueMarks("fallback", "input")), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello").Mark("beep"), + }).Mark("boop"), + // This is the least-intuitive case. The mark "boop" is attached to + // the default object, not it's elements, but both marks end up + // aggregated on the list element. + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("hello").WithMarks(cty.NewValueMarks("beep", "boop")), + }), + }), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("defaults(%#v, %#v)", test.Input, test.Defaults), func(t *testing.T) { + got, gotErr := Defaults(test.Input, test.Defaults) + + if test.WantErr != "" { + if gotErr == nil { + t.Fatalf("unexpected success\nwant error: %s", test.WantErr) + } + if got, want := gotErr.Error(), test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if gotErr != nil { + t.Fatalf("unexpected error\ngot: %s", gotErr.Error()) + } + + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/encoding.go b/internal/terraform/lang/funcs/encoding.go new file mode 100644 index 00000000..2e67ebc8 --- /dev/null +++ b/internal/terraform/lang/funcs/encoding.go @@ -0,0 +1,255 @@ +package funcs + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "log" + "net/url" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "golang.org/x/text/encoding/ianaindex" +) + +// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. +var Base64DecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowMarked: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + str, strMarks := args[0].Unmark() + s := str.AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data %s", redactIfSensitive(s, strMarks)) + } + if !utf8.Valid([]byte(sDec)) { + log.Printf("[DEBUG] the result of decoding the provided string is not valid UTF-8: %s", redactIfSensitive(sDec, strMarks)) + return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the provided string is not valid UTF-8") + } + return cty.StringVal(string(sDec)).WithMarks(strMarks), nil + }, +}) + +// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. +var Base64EncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil + }, +}) + +// TextEncodeBase64Func constructs a function that encodes a string to a target encoding and then to a base64 sequence. +var TextEncodeBase64Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "string", + Type: cty.String, + }, + { + Name: "encoding", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + encoding, err := ianaindex.IANA.Encoding(args[1].AsString()) + if err != nil || encoding == nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "%q is not a supported IANA encoding name or alias in this Terraform version", args[1].AsString()) + } + + encName, err := ianaindex.IANA.Name(encoding) + if err != nil { // would be weird, since we just read this encoding out + encName = args[1].AsString() + } + + encoder := encoding.NewEncoder() + encodedInput, err := encoder.Bytes([]byte(args[0].AsString())) + if err != nil { + // The string representations of "err" disclose implementation + // details of the underlying library, and the main error we might + // like to return a special message for is unexported as + // golang.org/x/text/encoding/internal.RepertoireError, so this + // is just a generic error message for now. + // + // We also don't include the string itself in the message because + // it can typically be very large, contain newline characters, + // etc. + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given string contains characters that cannot be represented in %s", encName) + } + + return cty.StringVal(base64.StdEncoding.EncodeToString(encodedInput)), nil + }, +}) + +// TextDecodeBase64Func constructs a function that decodes a base64 sequence to a target encoding. +var TextDecodeBase64Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "source", + Type: cty.String, + }, + { + Name: "encoding", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + encoding, err := ianaindex.IANA.Encoding(args[1].AsString()) + if err != nil || encoding == nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "%q is not a supported IANA encoding name or alias in this Terraform version", args[1].AsString()) + } + + encName, err := ianaindex.IANA.Name(encoding) + if err != nil { // would be weird, since we just read this encoding out + encName = args[1].AsString() + } + + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + switch err := err.(type) { + case base64.CorruptInputError: + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given value is has an invalid base64 symbol at offset %d", int(err)) + default: + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid source string: %w", err) + } + + } + + decoder := encoding.NewDecoder() + decoded, err := decoder.Bytes(sDec) + if err != nil || bytes.ContainsRune(decoded, '�') { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given string contains symbols that are not defined for %s", encName) + } + + return cty.StringVal(string(decoded)), nil + }, +}) + +// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in +// Base64 encoding. +var Base64GzipFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: %w", err) + } + if err := gz.Flush(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: %w", err) + } + if err := gz.Close(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: %w", err) + } + return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil + }, +}) + +// URLEncodeFunc constructs a function that applies URL encoding to a given string. +var URLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(url.QueryEscape(args[0].AsString())), nil + }, +}) + +// Base64Decode decodes a string containing a base64 sequence. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function +// produces an error. +func Base64Decode(str cty.Value) (cty.Value, error) { + return Base64DecodeFunc.Call([]cty.Value{str}) +} + +// Base64Encode applies Base64 encoding to a string. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func Base64Encode(str cty.Value) (cty.Value, error) { + return Base64EncodeFunc.Call([]cty.Value{str}) +} + +// Base64Gzip compresses a string with gzip and then encodes the result in +// Base64 encoding. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. +func Base64Gzip(str cty.Value) (cty.Value, error) { + return Base64GzipFunc.Call([]cty.Value{str}) +} + +// URLEncode applies URL encoding to a given string. +// +// This function identifies characters in the given string that would have a +// special meaning when included as a query string argument in a URL and +// escapes them using RFC 3986 "percent encoding". +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. +func URLEncode(str cty.Value) (cty.Value, error) { + return URLEncodeFunc.Call([]cty.Value{str}) +} + +// TextEncodeBase64 applies Base64 encoding to a string that was encoded before with a target encoding. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// First step is to apply the target IANA encoding (e.g. UTF-16LE). +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func TextEncodeBase64(str, enc cty.Value) (cty.Value, error) { + return TextEncodeBase64Func.Call([]cty.Value{str, enc}) +} + +// TextDecodeBase64 decodes a string containing a base64 sequence whereas a specific encoding of the string is expected. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// the target encoding. +func TextDecodeBase64(str, enc cty.Value) (cty.Value, error) { + return TextDecodeBase64Func.Call([]cty.Value{str, enc}) +} diff --git a/internal/terraform/lang/funcs/encoding_test.go b/internal/terraform/lang/funcs/encoding_test.go new file mode 100644 index 00000000..e1878cc4 --- /dev/null +++ b/internal/terraform/lang/funcs/encoding_test.go @@ -0,0 +1,359 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestBase64Decode(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + cty.StringVal("abc123!?$*&()'-=@~"), + false, + }, + { + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+").Mark(marks.Sensitive), + cty.StringVal("abc123!?$*&()'-=@~").Mark(marks.Sensitive), + false, + }, + { // Invalid base64 data decoding + cty.StringVal("this-is-an-invalid-base64-data"), + cty.UnknownVal(cty.String), + true, + }, + { // Invalid utf-8 + cty.StringVal("\xc3\x28"), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64decode(%#v)", test.String), func(t *testing.T) { + got, err := Base64Decode(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Decode_error(t *testing.T) { + tests := map[string]struct { + String cty.Value + WantErr string + }{ + "invalid base64": { + cty.StringVal("dfg"), + `failed to decode base64 data "dfg"`, + }, + "sensitive invalid base64": { + cty.StringVal("dfg").Mark(marks.Sensitive), + `failed to decode base64 data (sensitive value)`, + }, + "invalid utf-8": { + cty.StringVal("whee"), + "the result of decoding the provided string is not valid UTF-8", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + _, err := Base64Decode(test.String) + + if err == nil { + t.Fatal("succeeded; want error") + } + + if err.Error() != test.WantErr { + t.Errorf("wrong error result\ngot: %#v\nwant: %#v", err.Error(), test.WantErr) + } + }) + } +} + +func TestBase64Encode(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64encode(%#v)", test.String), func(t *testing.T) { + got, err := Base64Encode(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Gzip(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("H4sIAAAAAAAA/ypJLS4BAAAA//8BAAD//wx+f9gEAAAA"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64gzip(%#v)", test.String), func(t *testing.T) { + got, err := Base64Gzip(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestURLEncode(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("abc123-_"), + cty.StringVal("abc123-_"), + false, + }, + { + cty.StringVal("foo:bar@localhost?foo=bar&bar=baz"), + cty.StringVal("foo%3Abar%40localhost%3Ffoo%3Dbar%26bar%3Dbaz"), + false, + }, + { + cty.StringVal("mailto:email?subject=this+is+my+subject"), + cty.StringVal("mailto%3Aemail%3Fsubject%3Dthis%2Bis%2Bmy%2Bsubject"), + false, + }, + { + cty.StringVal("foo/bar"), + cty.StringVal("foo%2Fbar"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("urlencode(%#v)", test.String), func(t *testing.T) { + got, err := URLEncode(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64TextEncode(t *testing.T) { + tests := []struct { + String cty.Value + Encoding cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("UTF-8"), + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + ``, + }, + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("UTF-16LE"), + cty.StringVal("YQBiAGMAMQAyADMAIQA/ACQAKgAmACgAKQAnAC0APQBAAH4A"), + ``, + }, + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("CP936"), + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + ``, + }, + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("NOT-EXISTS"), + cty.UnknownVal(cty.String), + `"NOT-EXISTS" is not a supported IANA encoding name or alias in this Terraform version`, + }, + { + cty.StringVal("🤔"), + cty.StringVal("cp437"), + cty.UnknownVal(cty.String), + `the given string contains characters that cannot be represented in IBM437`, + }, + { + cty.UnknownVal(cty.String), + cty.StringVal("windows-1250"), + cty.UnknownVal(cty.String), + ``, + }, + { + cty.StringVal("hello world"), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("textencodebase64(%#v, %#v)", test.String, test.Encoding), func(t *testing.T) { + got, err := TextEncodeBase64(test.String, test.Encoding) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64TextDecode(t *testing.T) { + tests := []struct { + String cty.Value + Encoding cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + cty.StringVal("UTF-8"), + cty.StringVal("abc123!?$*&()'-=@~"), + ``, + }, + { + cty.StringVal("YQBiAGMAMQAyADMAIQA/ACQAKgAmACgAKQAnAC0APQBAAH4A"), + cty.StringVal("UTF-16LE"), + cty.StringVal("abc123!?$*&()'-=@~"), + ``, + }, + { + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + cty.StringVal("CP936"), + cty.StringVal("abc123!?$*&()'-=@~"), + ``, + }, + { + cty.StringVal("doesn't matter"), + cty.StringVal("NOT-EXISTS"), + cty.UnknownVal(cty.String), + `"NOT-EXISTS" is not a supported IANA encoding name or alias in this Terraform version`, + }, + { + cty.StringVal(""), + cty.StringVal("cp437"), + cty.UnknownVal(cty.String), + `the given value is has an invalid base64 symbol at offset 0`, + }, + { + cty.StringVal("gQ=="), // this is 0x81, which is not defined in windows-1250 + cty.StringVal("windows-1250"), + cty.StringVal("�"), + `the given string contains symbols that are not defined for windows-1250`, + }, + { + cty.UnknownVal(cty.String), + cty.StringVal("windows-1250"), + cty.UnknownVal(cty.String), + ``, + }, + { + cty.StringVal("YQBiAGMAMQAyADMAIQA/ACQAKgAmACgAKQAnAC0APQBAAH4A"), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("textdecodebase64(%#v, %#v)", test.String, test.Encoding), func(t *testing.T) { + got, err := TextDecodeBase64(test.String, test.Encoding) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/filesystem.go b/internal/terraform/lang/funcs/filesystem.go new file mode 100644 index 00000000..e5de7907 --- /dev/null +++ b/internal/terraform/lang/funcs/filesystem.go @@ -0,0 +1,500 @@ +package funcs + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "unicode/utf8" + + "github.com/bmatcuk/doublestar" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeFileFunc constructs a function that takes a file path and returns the +// contents of that file, either directly as a string (where valid UTF-8 is +// required) or as a string containing base64 bytes. +func MakeFileFunc(baseDir string, encBase64 bool) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + AllowMarked: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + pathArg, pathMarks := args[0].Unmark() + path := pathArg.AsString() + src, err := readFileBytes(baseDir, path, pathMarks) + if err != nil { + err = function.NewArgError(0, err) + return cty.UnknownVal(cty.String), err + } + + switch { + case encBase64: + enc := base64.StdEncoding.EncodeToString(src) + return cty.StringVal(enc).WithMarks(pathMarks), nil + default: + if !utf8.Valid(src) { + return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", redactIfSensitive(path, pathMarks)) + } + return cty.StringVal(string(src)).WithMarks(pathMarks), nil + } + }, + }) +} + +// MakeTemplateFileFunc constructs a function that takes a file path and +// an arbitrary object of named values and attempts to render the referenced +// file as a template using HCL template syntax. +// +// The template itself may recursively call other functions so a callback +// must be provided to get access to those functions. The template cannot, +// however, access any variables defined in the scope: it is restricted only to +// those variables provided in the second function argument, to ensure that all +// dependencies on other graph nodes can be seen before executing this function. +// +// As a special exception, a referenced template file may not recursively call +// the templatefile function, since that would risk the same file being +// included into itself indefinitely. +func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { + + params := []function.Parameter{ + { + Name: "path", + Type: cty.String, + AllowMarked: true, + }, + { + Name: "vars", + Type: cty.DynamicPseudoType, + }, + } + + loadTmpl := func(fn string, marks cty.ValueMarks) (hcl.Expression, error) { + // We re-use File here to ensure the same filename interpretation + // as it does, along with its other safety checks. + tmplVal, err := File(baseDir, cty.StringVal(fn).WithMarks(marks)) + if err != nil { + return nil, err + } + + expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return nil, diags + } + + return expr, nil + } + + renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { + if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { + return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time + } + + ctx := &hcl.EvalContext{ + Variables: varsVal.AsValueMap(), + } + + // We require all of the variables to be valid HCL identifiers, because + // otherwise there would be no way to refer to them in the template + // anyway. Rejecting this here gives better feedback to the user + // than a syntax error somewhere in the template itself. + for n := range ctx.Variables { + if !hclsyntax.ValidIdentifier(n) { + // This error message intentionally doesn't describe _all_ of + // the different permutations that are technically valid as an + // HCL identifier, but rather focuses on what we might + // consider to be an "idiomatic" variable name. + return cty.DynamicVal, function.NewArgErrorf(1, "invalid template variable name %q: must start with a letter, followed by zero or more letters, digits, and underscores", n) + } + } + + // We'll pre-check references in the template here so we can give a + // more specialized error message than HCL would by default, so it's + // clearer that this problem is coming from a templatefile call. + for _, traversal := range expr.Variables() { + root := traversal.RootName() + if _, ok := ctx.Variables[root]; !ok { + return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) + } + } + + givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems + funcs := make(map[string]function.Function, len(givenFuncs)) + for name, fn := range givenFuncs { + if name == "templatefile" { + // We stub this one out to prevent recursive calls. + funcs[name] = function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") + }, + }) + continue + } + funcs[name] = fn + } + ctx.Functions = funcs + + val, diags := expr.Value(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + return val, nil + } + + return function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + if !(args[0].IsKnown() && args[1].IsKnown()) { + return cty.DynamicPseudoType, nil + } + + // We'll render our template now to see what result type it produces. + // A template consisting only of a single interpolation an potentially + // return any type. + + pathArg, pathMarks := args[0].Unmark() + expr, err := loadTmpl(pathArg.AsString(), pathMarks) + if err != nil { + return cty.DynamicPseudoType, err + } + + // This is safe even if args[1] contains unknowns because the HCL + // template renderer itself knows how to short-circuit those. + val, err := renderTmpl(expr, args[1]) + return val.Type(), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + pathArg, pathMarks := args[0].Unmark() + expr, err := loadTmpl(pathArg.AsString(), pathMarks) + if err != nil { + return cty.DynamicVal, err + } + result, err := renderTmpl(expr, args[1]) + return result.WithMarks(pathMarks), err + }, + }) + +} + +// MakeFileExistsFunc constructs a function that takes a path +// and determines whether a file exists at that path +func MakeFileExistsFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + AllowMarked: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + pathArg, pathMarks := args[0].Unmark() + path := pathArg.AsString() + path, err := homedir.Expand(path) + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %w", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return cty.False.WithMarks(pathMarks), nil + } + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", redactIfSensitive(path, pathMarks)) + } + + if fi.Mode().IsRegular() { + return cty.True.WithMarks(pathMarks), nil + } + + // The Go stat API only provides convenient access to whether it's + // a directory or not, so we need to do some bit fiddling to + // recognize other irregular file types. + filename := redactIfSensitive(path, pathMarks) + fileType := fi.Mode().Type() + switch { + case (fileType & os.ModeDir) != 0: + err = function.NewArgErrorf(1, "%s is a directory, not a file", filename) + case (fileType & os.ModeDevice) != 0: + err = function.NewArgErrorf(1, "%s is a device node, not a regular file", filename) + case (fileType & os.ModeNamedPipe) != 0: + err = function.NewArgErrorf(1, "%s is a named pipe, not a regular file", filename) + case (fileType & os.ModeSocket) != 0: + err = function.NewArgErrorf(1, "%s is a unix domain socket, not a regular file", filename) + default: + // If it's not a type we recognize then we'll just return a + // generic error message. This should be very rare. + err = function.NewArgErrorf(1, "%s is not a regular file", filename) + + // Note: os.ModeSymlink should be impossible because we used + // os.Stat above, not os.Lstat. + } + + return cty.False, err + }, + }) +} + +// MakeFileSetFunc constructs a function that takes a glob pattern +// and enumerates a file set from that pattern +func MakeFileSetFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + AllowMarked: true, + }, + { + Name: "pattern", + Type: cty.String, + AllowMarked: true, + }, + }, + Type: function.StaticReturnType(cty.Set(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + pathArg, pathMarks := args[0].Unmark() + path := pathArg.AsString() + patternArg, patternMarks := args[1].Unmark() + pattern := patternArg.AsString() + + marks := []cty.ValueMarks{pathMarks, patternMarks} + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Join the path to the glob pattern, while ensuring the full + // pattern is canonical for the host OS. The joined path is + // automatically cleaned during this operation. + pattern = filepath.Join(path, pattern) + + matches, err := doublestar.Glob(pattern) + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to glob pattern %s: %w", redactIfSensitive(pattern, marks...), err) + } + + var matchVals []cty.Value + for _, match := range matches { + fi, err := os.Stat(match) + + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to stat %s: %w", redactIfSensitive(match, marks...), err) + } + + if !fi.Mode().IsRegular() { + continue + } + + // Remove the path and file separator from matches. + match, err = filepath.Rel(path, match) + + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to trim path of match %s: %w", redactIfSensitive(match, marks...), err) + } + + // Replace any remaining file separators with forward slash (/) + // separators for cross-system compatibility. + match = filepath.ToSlash(match) + + matchVals = append(matchVals, cty.StringVal(match)) + } + + if len(matchVals) == 0 { + return cty.SetValEmpty(cty.String).WithMarks(marks...), nil + } + + return cty.SetVal(matchVals).WithMarks(marks...), nil + }, + }) +} + +// BasenameFunc constructs a function that takes a string containing a filesystem path +// and removes all except the last portion from it. +var BasenameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Base(args[0].AsString())), nil + }, +}) + +// DirnameFunc constructs a function that takes a string containing a filesystem path +// and removes the last portion from it. +var DirnameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Dir(args[0].AsString())), nil + }, +}) + +// AbsPathFunc constructs a function that converts a filesystem path to an absolute path +var AbsPathFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + absPath, err := filepath.Abs(args[0].AsString()) + return cty.StringVal(filepath.ToSlash(absPath)), err + }, +}) + +// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. +var PathExpandFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + + homePath, err := homedir.Expand(args[0].AsString()) + return cty.StringVal(homePath), err + }, +}) + +func openFile(baseDir, path string) (*os.File, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("failed to expand ~: %w", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + return os.Open(path) +} + +func readFileBytes(baseDir, path string, marks cty.ValueMarks) ([]byte, error) { + f, err := openFile(baseDir, path) + if err != nil { + if os.IsNotExist(err) { + // An extra Terraform-specific hint for this situation + return nil, fmt.Errorf("no file exists at %s; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource", redactIfSensitive(path, marks)) + } + return nil, err + } + defer f.Close() + + src, err := ioutil.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + return src, nil +} + +// File reads the contents of the file at the given path. +// +// The file must contain valid UTF-8 bytes, or this function will return an error. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func File(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, false) + return fn.Call([]cty.Value{path}) +} + +// FileExists determines whether a file exists at the given path. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileExists(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileExistsFunc(baseDir) + return fn.Call([]cty.Value{path}) +} + +// FileSet enumerates a set of files given a glob pattern +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileSet(baseDir string, path, pattern cty.Value) (cty.Value, error) { + fn := MakeFileSetFunc(baseDir) + return fn.Call([]cty.Value{path, pattern}) +} + +// FileBase64 reads the contents of the file at the given path. +// +// The bytes from the file are encoded as base64 before returning. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, true) + return fn.Call([]cty.Value{path}) +} + +// Basename takes a string containing a filesystem path and removes all except the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Basename(path cty.Value) (cty.Value, error) { + return BasenameFunc.Call([]cty.Value{path}) +} + +// Dirname takes a string containing a filesystem path and removes the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Dirname(path cty.Value) (cty.Value, error) { + return DirnameFunc.Call([]cty.Value{path}) +} + +// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with +// the current user's home directory path. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the leading segment in the path is not `~` then the given path is returned unmodified. +func Pathexpand(path cty.Value) (cty.Value, error) { + return PathExpandFunc.Call([]cty.Value{path}) +} diff --git a/internal/terraform/lang/funcs/filesystem_test.go b/internal/terraform/lang/funcs/filesystem_test.go new file mode 100644 index 00000000..204328d7 --- /dev/null +++ b/internal/terraform/lang/funcs/filesystem_test.go @@ -0,0 +1,695 @@ +package funcs + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +func TestFile(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("Hello World"), + ``, + }, + { + cty.StringVal("testdata/icon.png"), + cty.NilVal, + `contents of "testdata/icon.png" are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/icon.png").Mark(marks.Sensitive), + cty.NilVal, + `contents of (sensitive value) are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + `no file exists at "testdata/missing"; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/missing").Mark(marks.Sensitive), + cty.NilVal, + `no file exists at (sensitive value); this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("File(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := File(".", test.Path) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestTemplateFile(t *testing.T) { + tests := []struct { + Path cty.Value + Vars cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.EmptyObjectVal, + cty.StringVal("Hello World"), + ``, + }, + { + cty.StringVal("testdata/icon.png"), + cty.EmptyObjectVal, + cty.NilVal, + `contents of "testdata/icon.png" are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/missing"), + cty.EmptyObjectVal, + cty.NilVal, + `no file exists at "testdata/missing"; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/secrets.txt").Mark(marks.Sensitive), + cty.EmptyObjectVal, + cty.NilVal, + `no file exists at (sensitive value); this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("Jodie"), + }), + cty.StringVal("Hello, Jodie!"), + ``, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.MapVal(map[string]cty.Value{ + "name!": cty.StringVal("Jodie"), + }), + cty.NilVal, + `invalid template variable name "name!": must start with a letter, followed by zero or more letters, digits, and underscores`, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Jimbo"), + }), + cty.StringVal("Hello, Jimbo!"), + ``, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.EmptyObjectVal, + cty.NilVal, + `vars map does not contain key "name", referenced at testdata/hello.tmpl:1,10-14`, + }, + { + cty.StringVal("testdata/func.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }), + cty.StringVal("The items are a, b, c"), + ``, + }, + { + cty.StringVal("testdata/recursive.tmpl"), + cty.MapValEmpty(cty.String), + cty.NilVal, + `testdata/recursive.tmpl:1,3-16: Error in function call; Call to function "templatefile" failed: cannot recursively call templatefile from inside templatefile call.`, + }, + { + cty.StringVal("testdata/list.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }), + cty.StringVal("- a\n- b\n- c\n"), + ``, + }, + { + cty.StringVal("testdata/list.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.True, + }), + cty.NilVal, + `testdata/list.tmpl:1,13-17: Iteration over non-iterable value; A value of type bool cannot be used as the collection in a 'for' expression.`, + }, + { + cty.StringVal("testdata/bare.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "val": cty.True, + }), + cty.True, // since this template contains only an interpolation, its true value shines through + ``, + }, + } + + templateFileFn := MakeTemplateFileFunc(".", func() map[string]function.Function { + return map[string]function.Function{ + "join": stdlib.JoinFunc, + "templatefile": MakeFileFunc(".", false), // just a placeholder, since templatefile itself overrides this + } + }) + + for _, test := range tests { + t.Run(fmt.Sprintf("TemplateFile(%#v, %#v)", test.Path, test.Vars), func(t *testing.T) { + got, err := templateFileFn.Call([]cty.Value{test.Path, test.Vars}) + + if argErr, ok := err.(function.ArgError); ok { + if argErr.Index < 0 || argErr.Index > 1 { + t.Errorf("ArgError index %d is out of range for templatefile (must be 0 or 1)", argErr.Index) + } + } + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileExists(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.BoolVal(true), + ``, + }, + { + cty.StringVal(""), + cty.BoolVal(false), + `"." is a directory, not a file`, + }, + { + cty.StringVal("testdata").Mark(marks.Sensitive), + cty.BoolVal(false), + `(sensitive value) is a directory, not a file`, + }, + { + cty.StringVal("testdata/missing"), + cty.BoolVal(false), + ``, + }, + { + cty.StringVal("testdata/unreadable/foobar"), + cty.BoolVal(false), + `failed to stat "testdata/unreadable/foobar"`, + }, + { + cty.StringVal("testdata/unreadable/foobar").Mark(marks.Sensitive), + cty.BoolVal(false), + `failed to stat (sensitive value)`, + }, + } + + // Ensure "unreadable" directory cannot be listed during the test run + fi, err := os.Lstat("testdata/unreadable") + if err != nil { + t.Fatal(err) + } + os.Chmod("testdata/unreadable", 0000) + defer func(mode os.FileMode) { + os.Chmod("testdata/unreadable", mode) + }(fi.Mode()) + + for _, test := range tests { + t.Run(fmt.Sprintf("FileExists(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := FileExists(".", test.Path) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSet(t *testing.T) { + tests := []struct { + Path cty.Value + Pattern cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("."), + cty.StringVal("testdata*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("{testdata,missing}"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/missing*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.???"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.{tmpl,txt}"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/hello.{tmpl,txt}"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("["), + cty.SetValEmpty(cty.String), + `failed to glob pattern "[": syntax error in pattern`, + }, + { + cty.StringVal("."), + cty.StringVal("[").Mark(marks.Sensitive), + cty.SetValEmpty(cty.String), + `failed to glob pattern (sensitive value): syntax error in pattern`, + }, + { + cty.StringVal("."), + cty.StringVal("\\"), + cty.SetValEmpty(cty.String), + `failed to glob pattern "\\": syntax error in pattern`, + }, + { + cty.StringVal("testdata"), + cty.StringVal("missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("missing*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello.???"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.tmpl"), + cty.StringVal("hello.txt"), + }), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("FileSet(\".\", %#v, %#v)", test.Path, test.Pattern), func(t *testing.T) { + got, err := FileSet(".", test.Path, test.Pattern) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileBase64(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("SGVsbG8gV29ybGQ="), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAq1BMVEX///9cTuVeUeRcTuZcTuZcT+VbSe1cTuVdT+MAAP9JSbZcT+VcTuZAQLFAQLJcTuVcTuZcUuBBQbA/P7JAQLJaTuRcT+RcTuVGQ7xAQLJVVf9cTuVcTuVGRMFeUeRbTeJcTuU/P7JeTeZbTOVcTeZAQLJBQbNAQLNaUORcTeZbT+VcTuRAQLNAQLRdTuRHR8xgUOdgUN9cTuVdTeRdT+VZTulcTuVAQLL///8+GmETAAAANnRSTlMApibw+osO6DcBB3fIX87+oRk3yehB0/Nj/gNs7nsTRv3dHmu//JYUMLVr3bssjxkgEK5CaxeK03nIAAAAAWJLR0QAiAUdSAAAAAlwSFlzAAADoQAAA6EBvJf9gwAAAAd0SU1FB+EEBRIQDxZNTKsAAACCSURBVBjTfc7JFsFQEATQQpCYxyBEzJ55rvf/f0ZHcyQLvelTd1GngEwWycs5+UISyKLraSi9geWKK9Gr1j7AeqOJVtt2XtD1Bchef2BjQDAcCTC0CsA4mihMtXw2XwgsV2sFw812F+4P3y2GdI6nn3FGSs//4HJNAXDzU4Dg/oj/E+bsEbhf5cMsAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDE3LTA0LTA1VDE4OjE2OjE1KzAyOjAws5bLVQAAACV0RVh0ZGF0ZTptb2RpZnkAMjAxNy0wNC0wNVQxODoxNjoxNSswMjowMMLLc+kAAAAZdEVYdFNvZnR3YXJlAHd3dy5pbmtzY2FwZS5vcmeb7jwaAAAAC3RFWHRUaXRsZQBHcm91cJYfIowAAABXelRYdFJhdyBwcm9maWxlIHR5cGUgaXB0YwAAeJzj8gwIcVYoKMpPy8xJ5VIAAyMLLmMLEyMTS5MUAxMgRIA0w2QDI7NUIMvY1MjEzMQcxAfLgEigSi4A6hcRdPJCNZUAAAAASUVORK5CYII="), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("FileBase64(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := FileBase64(".", test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBasename(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("hello.txt"), + false, + }, + { + cty.StringVal("hello.txt"), + cty.StringVal("hello.txt"), + false, + }, + { + cty.StringVal(""), + cty.StringVal("."), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Basename(%#v)", test.Path), func(t *testing.T) { + got, err := Basename(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestDirname(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("testdata"), + false, + }, + { + cty.StringVal("testdata/foo/hello.txt"), + cty.StringVal("testdata/foo"), + false, + }, + { + cty.StringVal("hello.txt"), + cty.StringVal("."), + false, + }, + { + cty.StringVal(""), + cty.StringVal("."), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Dirname(%#v)", test.Path), func(t *testing.T) { + got, err := Dirname(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestPathExpand(t *testing.T) { + homePath, err := homedir.Dir() + if err != nil { + t.Fatalf("Error getting home directory: %v", err) + } + + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("~/test-file"), + cty.StringVal(filepath.Join(homePath, "test-file")), + false, + }, + { + cty.StringVal("~/another/test/file"), + cty.StringVal(filepath.Join(homePath, "another/test/file")), + false, + }, + { + cty.StringVal("/root/file"), + cty.StringVal("/root/file"), + false, + }, + { + cty.StringVal("/"), + cty.StringVal("/"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Dirname(%#v)", test.Path), func(t *testing.T) { + got, err := Pathexpand(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/number.go b/internal/terraform/lang/funcs/number.go new file mode 100644 index 00000000..d9587061 --- /dev/null +++ b/internal/terraform/lang/funcs/number.go @@ -0,0 +1,173 @@ +package funcs + +import ( + "math" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// LogFunc contructs a function that returns the logarithm of a given number in a given base. +var LogFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var base float64 + if err := gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil + }, +}) + +// PowFunc contructs a function that returns the logarithm of a given number in a given base. +var PowFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "power", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var power float64 + if err := gocty.FromCtyValue(args[1], &power); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Pow(num, power)), nil + }, +}) + +// SignumFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var SignumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num int + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + switch { + case num < 0: + return cty.NumberIntVal(-1), nil + case num > 0: + return cty.NumberIntVal(+1), nil + default: + return cty.NumberIntVal(0), nil + } + }, +}) + +// ParseIntFunc contructs a function that parses a string argument and returns an integer of the specified base. +var ParseIntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "number", + Type: cty.DynamicPseudoType, + AllowMarked: true, + }, + { + Name: "base", + Type: cty.Number, + AllowMarked: true, + }, + }, + + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].Type().Equals(cty.String) { + return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) + } + return cty.Number, nil + }, + + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var numstr string + var base int + var err error + + numArg, numMarks := args[0].Unmark() + if err = gocty.FromCtyValue(numArg, &numstr); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(0, err) + } + + baseArg, baseMarks := args[1].Unmark() + if err = gocty.FromCtyValue(baseArg, &base); err != nil { + return cty.UnknownVal(cty.Number), function.NewArgError(1, err) + } + + if base < 2 || base > 62 { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 1, + "base must be a whole number between 2 and 62 inclusive", + ) + } + + num, ok := (&big.Int{}).SetString(numstr, base) + if !ok { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 0, + "cannot parse %s as a base %s integer", + redactIfSensitive(numstr, numMarks), + redactIfSensitive(base, baseMarks), + ) + } + + parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)).WithMarks(numMarks, baseMarks) + + return parsedNum, nil + }, +}) + +// Log returns returns the logarithm of a given number in a given base. +func Log(num, base cty.Value) (cty.Value, error) { + return LogFunc.Call([]cty.Value{num, base}) +} + +// Pow returns the logarithm of a given number in a given base. +func Pow(num, power cty.Value) (cty.Value, error) { + return PowFunc.Call([]cty.Value{num, power}) +} + +// Signum determines the sign of a number, returning a number between -1 and +// 1 to represent the sign. +func Signum(num cty.Value) (cty.Value, error) { + return SignumFunc.Call([]cty.Value{num}) +} + +// ParseInt parses a string argument and returns an integer of the specified base. +func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { + return ParseIntFunc.Call([]cty.Value{num, base}) +} diff --git a/internal/terraform/lang/funcs/number_test.go b/internal/terraform/lang/funcs/number_test.go new file mode 100644 index 00000000..2fa8e6ff --- /dev/null +++ b/internal/terraform/lang/funcs/number_test.go @@ -0,0 +1,384 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestLog(t *testing.T) { + tests := []struct { + Num cty.Value + Base cty.Value + Want cty.Value + Err bool + }{ + { + cty.NumberFloatVal(1), + cty.NumberFloatVal(10), + cty.NumberFloatVal(0), + false, + }, + { + cty.NumberFloatVal(10), + cty.NumberFloatVal(10), + cty.NumberFloatVal(1), + false, + }, + + { + cty.NumberFloatVal(0), + cty.NumberFloatVal(10), + cty.NegativeInfinity, + false, + }, + { + cty.NumberFloatVal(10), + cty.NumberFloatVal(0), + cty.NumberFloatVal(-0), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("log(%#v, %#v)", test.Num, test.Base), func(t *testing.T) { + got, err := Log(test.Num, test.Base) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestPow(t *testing.T) { + tests := []struct { + Num cty.Value + Power cty.Value + Want cty.Value + Err bool + }{ + { + cty.NumberFloatVal(1), + cty.NumberFloatVal(0), + cty.NumberFloatVal(1), + false, + }, + { + cty.NumberFloatVal(1), + cty.NumberFloatVal(1), + cty.NumberFloatVal(1), + false, + }, + + { + cty.NumberFloatVal(2), + cty.NumberFloatVal(0), + cty.NumberFloatVal(1), + false, + }, + { + cty.NumberFloatVal(2), + cty.NumberFloatVal(1), + cty.NumberFloatVal(2), + false, + }, + { + cty.NumberFloatVal(3), + cty.NumberFloatVal(2), + cty.NumberFloatVal(9), + false, + }, + { + cty.NumberFloatVal(-3), + cty.NumberFloatVal(2), + cty.NumberFloatVal(9), + false, + }, + { + cty.NumberFloatVal(2), + cty.NumberFloatVal(-2), + cty.NumberFloatVal(0.25), + false, + }, + { + cty.NumberFloatVal(0), + cty.NumberFloatVal(2), + cty.NumberFloatVal(0), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("pow(%#v, %#v)", test.Num, test.Power), func(t *testing.T) { + got, err := Pow(test.Num, test.Power) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSignum(t *testing.T) { + tests := []struct { + Num cty.Value + Want cty.Value + Err bool + }{ + { + cty.NumberFloatVal(0), + cty.NumberFloatVal(0), + false, + }, + { + cty.NumberFloatVal(12), + cty.NumberFloatVal(1), + false, + }, + { + cty.NumberFloatVal(-29), + cty.NumberFloatVal(-1), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("signum(%#v)", test.Num), func(t *testing.T) { + got, err := Signum(test.Num) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestParseInt(t *testing.T) { + tests := []struct { + Num cty.Value + Base cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("128"), + cty.NumberIntVal(10), + cty.NumberIntVal(128), + ``, + }, + { + cty.StringVal("128").Mark(marks.Sensitive), + cty.NumberIntVal(10), + cty.NumberIntVal(128).Mark(marks.Sensitive), + ``, + }, + { + cty.StringVal("128"), + cty.NumberIntVal(10).Mark(marks.Sensitive), + cty.NumberIntVal(128).Mark(marks.Sensitive), + ``, + }, + { + cty.StringVal("128").Mark(marks.Sensitive), + cty.NumberIntVal(10).Mark(marks.Sensitive), + cty.NumberIntVal(128).Mark(marks.Sensitive), + ``, + }, + { + cty.StringVal("128").Mark("boop"), + cty.NumberIntVal(10).Mark(marks.Sensitive), + cty.NumberIntVal(128).WithMarks(cty.NewValueMarks("boop", marks.Sensitive)), + ``, + }, + { + cty.StringVal("-128"), + cty.NumberIntVal(10), + cty.NumberIntVal(-128), + ``, + }, + { + cty.StringVal("00128"), + cty.NumberIntVal(10), + cty.NumberIntVal(128), + ``, + }, + { + cty.StringVal("-00128"), + cty.NumberIntVal(10), + cty.NumberIntVal(-128), + ``, + }, + { + cty.StringVal("FF00"), + cty.NumberIntVal(16), + cty.NumberIntVal(65280), + ``, + }, + { + cty.StringVal("ff00"), + cty.NumberIntVal(16), + cty.NumberIntVal(65280), + ``, + }, + { + cty.StringVal("-FF00"), + cty.NumberIntVal(16), + cty.NumberIntVal(-65280), + ``, + }, + { + cty.StringVal("00FF00"), + cty.NumberIntVal(16), + cty.NumberIntVal(65280), + ``, + }, + { + cty.StringVal("-00FF00"), + cty.NumberIntVal(16), + cty.NumberIntVal(-65280), + ``, + }, + { + cty.StringVal("1011111011101111"), + cty.NumberIntVal(2), + cty.NumberIntVal(48879), + ``, + }, + { + cty.StringVal("aA"), + cty.NumberIntVal(62), + cty.NumberIntVal(656), + ``, + }, + { + cty.StringVal("Aa"), + cty.NumberIntVal(62), + cty.NumberIntVal(2242), + ``, + }, + { + cty.StringVal("999999999999999999999999999999999999999999999999999999999999"), + cty.NumberIntVal(10), + cty.MustParseNumberVal("999999999999999999999999999999999999999999999999999999999999"), + ``, + }, + { + cty.StringVal("FF"), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse "FF" as a base 10 integer`, + }, + { + cty.StringVal("FF").Mark(marks.Sensitive), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse (sensitive value) as a base 10 integer`, + }, + { + cty.StringVal("FF").Mark(marks.Sensitive), + cty.NumberIntVal(10).Mark(marks.Sensitive), + cty.UnknownVal(cty.Number), + `cannot parse (sensitive value) as a base (sensitive value) integer`, + }, + { + cty.StringVal("00FF"), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse "00FF" as a base 10 integer`, + }, + { + cty.StringVal("-00FF"), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse "-00FF" as a base 10 integer`, + }, + { + cty.NumberIntVal(2), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `first argument must be a string, not number`, + }, + { + cty.StringVal("1"), + cty.NumberIntVal(63), + cty.UnknownVal(cty.Number), + `base must be a whole number between 2 and 62 inclusive`, + }, + { + cty.StringVal("1"), + cty.NumberIntVal(-1), + cty.UnknownVal(cty.Number), + `base must be a whole number between 2 and 62 inclusive`, + }, + { + cty.StringVal("1"), + cty.NumberIntVal(1), + cty.UnknownVal(cty.Number), + `base must be a whole number between 2 and 62 inclusive`, + }, + { + cty.StringVal("1"), + cty.NumberIntVal(0), + cty.UnknownVal(cty.Number), + `base must be a whole number between 2 and 62 inclusive`, + }, + { + cty.StringVal("1.2"), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse "1.2" as a base 10 integer`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("parseint(%#v, %#v)", test.Num, test.Base), func(t *testing.T) { + got, err := ParseInt(test.Num, test.Base) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/redact.go b/internal/terraform/lang/funcs/redact.go new file mode 100644 index 00000000..83d7dd43 --- /dev/null +++ b/internal/terraform/lang/funcs/redact.go @@ -0,0 +1,20 @@ +package funcs + +import ( + "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func redactIfSensitive(value interface{}, markses ...cty.ValueMarks) string { + if marks.Has(cty.DynamicVal.WithMarks(markses...), marks.Sensitive) { + return "(sensitive value)" + } + switch v := value.(type) { + case string: + return fmt.Sprintf("%q", v) + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/internal/terraform/lang/funcs/redact_test.go b/internal/terraform/lang/funcs/redact_test.go new file mode 100644 index 00000000..2de6cacf --- /dev/null +++ b/internal/terraform/lang/funcs/redact_test.go @@ -0,0 +1,51 @@ +package funcs + +import ( + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestRedactIfSensitive(t *testing.T) { + testCases := map[string]struct { + value interface{} + marks []cty.ValueMarks + want string + }{ + "sensitive string": { + value: "foo", + marks: []cty.ValueMarks{cty.NewValueMarks(marks.Sensitive)}, + want: "(sensitive value)", + }, + "marked non-sensitive string": { + value: "foo", + marks: []cty.ValueMarks{cty.NewValueMarks("boop")}, + want: `"foo"`, + }, + "sensitive string with other marks": { + value: "foo", + marks: []cty.ValueMarks{cty.NewValueMarks("boop"), cty.NewValueMarks(marks.Sensitive)}, + want: "(sensitive value)", + }, + "sensitive number": { + value: 12345, + marks: []cty.ValueMarks{cty.NewValueMarks(marks.Sensitive)}, + want: "(sensitive value)", + }, + "non-sensitive number": { + value: 12345, + marks: []cty.ValueMarks{}, + want: "12345", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := redactIfSensitive(tc.value, tc.marks...) + if got != tc.want { + t.Errorf("wrong result, got %v, want %v", got, tc.want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/sensitive.go b/internal/terraform/lang/funcs/sensitive.go new file mode 100644 index 00000000..ff7d87b0 --- /dev/null +++ b/internal/terraform/lang/funcs/sensitive.go @@ -0,0 +1,67 @@ +package funcs + +import ( + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// SensitiveFunc returns a value identical to its argument except that +// Terraform will consider it to be sensitive. +var SensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // This function only affects the value's marks, so the result + // type is always the same as the argument type. + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + val, _ := args[0].Unmark() + return val.Mark(marks.Sensitive), nil + }, +}) + +// NonsensitiveFunc takes a sensitive value and returns the same value without +// the sensitive marking, effectively exposing the value. +var NonsensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // This function only affects the value's marks, so the result + // type is always the same as the argument type. + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if args[0].IsKnown() && !args[0].HasMark(marks.Sensitive) { + return cty.DynamicVal, function.NewArgErrorf(0, "the given value is not sensitive, so this call is redundant") + } + v, m := args[0].Unmark() + delete(m, marks.Sensitive) // remove the sensitive marking + return v.WithMarks(m), nil + }, +}) + +func Sensitive(v cty.Value) (cty.Value, error) { + return SensitiveFunc.Call([]cty.Value{v}) +} + +func Nonsensitive(v cty.Value) (cty.Value, error) { + return NonsensitiveFunc.Call([]cty.Value{v}) +} diff --git a/internal/terraform/lang/funcs/sensitive_test.go b/internal/terraform/lang/funcs/sensitive_test.go new file mode 100644 index 00000000..892832f3 --- /dev/null +++ b/internal/terraform/lang/funcs/sensitive_test.go @@ -0,0 +1,179 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestSensitive(t *testing.T) { + tests := []struct { + Input cty.Value + WantErr string + }{ + { + cty.NumberIntVal(1), + ``, + }, + { + // Unknown values stay unknown while becoming sensitive + cty.UnknownVal(cty.String), + ``, + }, + { + // Null values stay unknown while becoming sensitive + cty.NullVal(cty.String), + ``, + }, + { + // DynamicVal can be marked as sensitive + cty.DynamicVal, + ``, + }, + { + // The marking is shallow only + cty.ListVal([]cty.Value{cty.NumberIntVal(1)}), + ``, + }, + { + // A value already marked is allowed and stays marked + cty.NumberIntVal(1).Mark(marks.Sensitive), + ``, + }, + { + // A value with some non-standard mark gets "fixed" to be marked + // with the standard "sensitive" mark. (This situation occurring + // would imply an inconsistency/bug elsewhere, so we're just + // being robust about it here.) + cty.NumberIntVal(1).Mark("bloop"), + ``, + }, + { + // A value deep already marked is allowed and stays marked, + // _and_ we'll also mark the outer collection as sensitive. + cty.ListVal([]cty.Value{cty.NumberIntVal(1).Mark(marks.Sensitive)}), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sensitive(%#v)", test.Input), func(t *testing.T) { + got, err := Sensitive(test.Input) + + if test.WantErr != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.HasMark(marks.Sensitive) { + t.Errorf("result is not marked sensitive") + } + + gotRaw, gotMarks := got.Unmark() + if len(gotMarks) != 1 { + // We're only expecting to have the "sensitive" mark we checked + // above. Any others are an error, even if they happen to + // appear alongside "sensitive". (We might change this rule + // if someday we decide to use marks for some additional + // unrelated thing in Terraform, but currently we assume that + // _all_ marks imply sensitive, and so returning any other + // marks would be confusing.) + t.Errorf("extraneous marks %#v", gotMarks) + } + + // Disregarding shallow marks, the result should have the same + // effective value as the input. + wantRaw, _ := test.Input.Unmark() + if !gotRaw.RawEquals(wantRaw) { + t.Errorf("wrong unmarked result\ngot: %#v\nwant: %#v", got, wantRaw) + } + }) + } +} + +func TestNonsensitive(t *testing.T) { + tests := []struct { + Input cty.Value + WantErr string + }{ + { + cty.NumberIntVal(1).Mark(marks.Sensitive), + ``, + }, + { + cty.DynamicVal.Mark(marks.Sensitive), + ``, + }, + { + cty.UnknownVal(cty.String).Mark(marks.Sensitive), + ``, + }, + { + cty.NullVal(cty.EmptyObject).Mark(marks.Sensitive), + ``, + }, + { + // The inner sensitive remains afterwards + cty.ListVal([]cty.Value{cty.NumberIntVal(1).Mark(marks.Sensitive)}).Mark(marks.Sensitive), + ``, + }, + + // Passing a value that is already non-sensitive is an error, + // because this function should always be used with specific + // intention, not just as a "make everything visible" hammer. + { + cty.NumberIntVal(1), + `the given value is not sensitive, so this call is redundant`, + }, + { + cty.NullVal(cty.String), + `the given value is not sensitive, so this call is redundant`, + }, + + // Unknown values may become sensitive once they are known, so we + // permit them to be marked nonsensitive. + { + cty.DynamicVal, + ``, + }, + { + cty.UnknownVal(cty.String), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("nonsensitive(%#v)", test.Input), func(t *testing.T) { + got, err := Nonsensitive(test.Input) + + if test.WantErr != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if got.HasMark(marks.Sensitive) { + t.Errorf("result is still marked sensitive") + } + wantRaw, _ := test.Input.Unmark() + if !got.RawEquals(wantRaw) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Input) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/string.go b/internal/terraform/lang/funcs/string.go new file mode 100644 index 00000000..ab6da727 --- /dev/null +++ b/internal/terraform/lang/funcs/string.go @@ -0,0 +1,53 @@ +package funcs + +import ( + "regexp" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// ReplaceFunc constructs a function that searches a given string for another +// given substring, and replaces each occurence with a given replacement string. +var ReplaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "substr", + Type: cty.String, + }, + { + Name: "replace", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + str := args[0].AsString() + substr := args[1].AsString() + replace := args[2].AsString() + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { + re, err := regexp.Compile(substr[1 : len(substr)-1]) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(re.ReplaceAllString(str, replace)), nil + } + + return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil + }, +}) + +// Replace searches a given string for another given substring, +// and replaces all occurences with a given replacement string. +func Replace(str, substr, replace cty.Value) (cty.Value, error) { + return ReplaceFunc.Call([]cty.Value{str, substr, replace}) +} diff --git a/internal/terraform/lang/funcs/string_test.go b/internal/terraform/lang/funcs/string_test.go new file mode 100644 index 00000000..7b44a276 --- /dev/null +++ b/internal/terraform/lang/funcs/string_test.go @@ -0,0 +1,73 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestReplace(t *testing.T) { + tests := []struct { + String cty.Value + Substr cty.Value + Replace cty.Value + Want cty.Value + Err bool + }{ + { // Regular search and replace + cty.StringVal("hello"), + cty.StringVal("hel"), + cty.StringVal("bel"), + cty.StringVal("bello"), + false, + }, + { // Search string doesn't match + cty.StringVal("hello"), + cty.StringVal("nope"), + cty.StringVal("bel"), + cty.StringVal("hello"), + false, + }, + { // Regular expression + cty.StringVal("hello"), + cty.StringVal("/l/"), + cty.StringVal("L"), + cty.StringVal("heLLo"), + false, + }, + { + cty.StringVal("helo"), + cty.StringVal("/(l)/"), + cty.StringVal("$1$1"), + cty.StringVal("hello"), + false, + }, + { // Bad regexp + cty.StringVal("hello"), + cty.StringVal("/(l/"), + cty.StringVal("$1$1"), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("replace(%#v, %#v, %#v)", test.String, test.Substr, test.Replace), func(t *testing.T) { + got, err := Replace(test.String, test.Substr, test.Replace) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/lang/funcs/testdata/bare.tmpl b/internal/terraform/lang/funcs/testdata/bare.tmpl new file mode 100644 index 00000000..da7cbab0 --- /dev/null +++ b/internal/terraform/lang/funcs/testdata/bare.tmpl @@ -0,0 +1 @@ +${val} \ No newline at end of file diff --git a/internal/terraform/lang/funcs/testdata/func.tmpl b/internal/terraform/lang/funcs/testdata/func.tmpl new file mode 100644 index 00000000..33a24000 --- /dev/null +++ b/internal/terraform/lang/funcs/testdata/func.tmpl @@ -0,0 +1 @@ +The items are ${join(", ", list)} \ No newline at end of file diff --git a/internal/terraform/lang/funcs/testdata/hello.tmpl b/internal/terraform/lang/funcs/testdata/hello.tmpl new file mode 100644 index 00000000..f112ef89 --- /dev/null +++ b/internal/terraform/lang/funcs/testdata/hello.tmpl @@ -0,0 +1 @@ +Hello, ${name}! \ No newline at end of file diff --git a/internal/terraform/lang/funcs/testdata/hello.txt b/internal/terraform/lang/funcs/testdata/hello.txt new file mode 100644 index 00000000..5e1c309d --- /dev/null +++ b/internal/terraform/lang/funcs/testdata/hello.txt @@ -0,0 +1 @@ +Hello World \ No newline at end of file diff --git a/internal/terraform/lang/funcs/testdata/icon.png b/internal/terraform/lang/funcs/testdata/icon.png new file mode 100644 index 00000000..a474f146 Binary files /dev/null and b/internal/terraform/lang/funcs/testdata/icon.png differ diff --git a/internal/terraform/lang/funcs/testdata/list.tmpl b/internal/terraform/lang/funcs/testdata/list.tmpl new file mode 100644 index 00000000..da8f4749 --- /dev/null +++ b/internal/terraform/lang/funcs/testdata/list.tmpl @@ -0,0 +1,3 @@ +%{ for x in list ~} +- ${x} +%{ endfor ~} diff --git a/internal/terraform/lang/funcs/testdata/recursive.tmpl b/internal/terraform/lang/funcs/testdata/recursive.tmpl new file mode 100644 index 00000000..f121b604 --- /dev/null +++ b/internal/terraform/lang/funcs/testdata/recursive.tmpl @@ -0,0 +1 @@ +${templatefile("recursive.tmpl", {})} \ No newline at end of file diff --git a/internal/terraform/lang/funcs/testdata/unreadable/foobar b/internal/terraform/lang/funcs/testdata/unreadable/foobar new file mode 100644 index 00000000..e69de29b diff --git a/internal/terraform/lang/functions.go b/internal/terraform/lang/functions.go new file mode 100644 index 00000000..3d9c372a --- /dev/null +++ b/internal/terraform/lang/functions.go @@ -0,0 +1,200 @@ +package lang + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/ext/tryfunc" + ctyyaml "github.com/zclconf/go-cty-yaml" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/camptocamp/terraboard/internal/terraform/experiments" + "github.com/camptocamp/terraboard/internal/terraform/lang/funcs" +) + +var impureFunctions = []string{ + "bcrypt", + "timestamp", + "uuid", +} + +// Functions returns the set of functions that should be used to when evaluating +// expressions in the receiving scope. +func (s *Scope) Functions() map[string]function.Function { + s.funcsLock.Lock() + if s.funcs == nil { + // Some of our functions are just directly the cty stdlib functions. + // Others are implemented in the subdirectory "funcs" here in this + // repository. New functions should generally start out their lives + // in the "funcs" directory and potentially graduate to cty stdlib + // later if the functionality seems to be something domain-agnostic + // that would be useful to all applications using cty functions. + + s.funcs = map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "abspath": funcs.AbsPathFunc, + "alltrue": funcs.AllTrueFunc, + "anytrue": funcs.AnyTrueFunc, + "basename": funcs.BasenameFunc, + "base64decode": funcs.Base64DecodeFunc, + "base64encode": funcs.Base64EncodeFunc, + "base64gzip": funcs.Base64GzipFunc, + "base64sha256": funcs.Base64Sha256Func, + "base64sha512": funcs.Base64Sha512Func, + "bcrypt": funcs.BcryptFunc, + "can": tryfunc.CanFunc, + "ceil": stdlib.CeilFunc, + "chomp": stdlib.ChompFunc, + "cidrhost": funcs.CidrHostFunc, + "cidrnetmask": funcs.CidrNetmaskFunc, + "cidrsubnet": funcs.CidrSubnetFunc, + "cidrsubnets": funcs.CidrSubnetsFunc, + "coalesce": funcs.CoalesceFunc, + "coalescelist": stdlib.CoalesceListFunc, + "compact": stdlib.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": stdlib.ContainsFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "defaults": s.experimentalFunction(experiments.ModuleVariableOptionalAttrs, funcs.DefaultsFunc), + "dirname": funcs.DirnameFunc, + "distinct": stdlib.DistinctFunc, + "element": stdlib.ElementFunc, + "chunklist": stdlib.ChunklistFunc, + "file": funcs.MakeFileFunc(s.BaseDir, false), + "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), + "fileset": funcs.MakeFileSetFunc(s.BaseDir), + "filebase64": funcs.MakeFileFunc(s.BaseDir, true), + "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), + "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), + "filemd5": funcs.MakeFileMd5Func(s.BaseDir), + "filesha1": funcs.MakeFileSha1Func(s.BaseDir), + "filesha256": funcs.MakeFileSha256Func(s.BaseDir), + "filesha512": funcs.MakeFileSha512Func(s.BaseDir), + "flatten": stdlib.FlattenFunc, + "floor": stdlib.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": stdlib.IndentFunc, + "index": funcs.IndexFunc, // stdlib.IndexFunc is not compatible + "join": stdlib.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": stdlib.KeysFunc, + "length": funcs.LengthFunc, + "list": funcs.ListFunc, + "log": stdlib.LogFunc, + "lookup": funcs.LookupFunc, + "lower": stdlib.LowerFunc, + "map": funcs.MapFunc, + "matchkeys": funcs.MatchkeysFunc, + "max": stdlib.MaxFunc, + "md5": funcs.Md5Func, + "merge": stdlib.MergeFunc, + "min": stdlib.MinFunc, + "one": funcs.OneFunc, + "parseint": stdlib.ParseIntFunc, + "pathexpand": funcs.PathExpandFunc, + "pow": stdlib.PowFunc, + "range": stdlib.RangeFunc, + "regex": stdlib.RegexFunc, + "regexall": stdlib.RegexAllFunc, + "replace": funcs.ReplaceFunc, + "reverse": stdlib.ReverseListFunc, + "rsadecrypt": funcs.RsaDecryptFunc, + "sensitive": funcs.SensitiveFunc, + "nonsensitive": funcs.NonsensitiveFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": stdlib.SetProductFunc, + "setsubtract": stdlib.SetSubtractFunc, + "setunion": stdlib.SetUnionFunc, + "sha1": funcs.Sha1Func, + "sha256": funcs.Sha256Func, + "sha512": funcs.Sha512Func, + "signum": stdlib.SignumFunc, + "slice": stdlib.SliceFunc, + "sort": stdlib.SortFunc, + "split": stdlib.SplitFunc, + "strrev": stdlib.ReverseFunc, + "substr": stdlib.SubstrFunc, + "sum": funcs.SumFunc, + "textdecodebase64": funcs.TextDecodeBase64Func, + "textencodebase64": funcs.TextEncodeBase64Func, + "timestamp": funcs.TimestampFunc, + "timeadd": stdlib.TimeAddFunc, + "title": stdlib.TitleFunc, + "tostring": funcs.MakeToFunc(cty.String), + "tonumber": funcs.MakeToFunc(cty.Number), + "tobool": funcs.MakeToFunc(cty.Bool), + "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), + "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), + "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), + "transpose": funcs.TransposeFunc, + "trim": stdlib.TrimFunc, + "trimprefix": stdlib.TrimPrefixFunc, + "trimspace": stdlib.TrimSpaceFunc, + "trimsuffix": stdlib.TrimSuffixFunc, + "try": tryfunc.TryFunc, + "upper": stdlib.UpperFunc, + "urlencode": funcs.URLEncodeFunc, + "uuid": funcs.UUIDFunc, + "uuidv5": funcs.UUIDV5Func, + "values": stdlib.ValuesFunc, + "yamldecode": ctyyaml.YAMLDecodeFunc, + "yamlencode": ctyyaml.YAMLEncodeFunc, + "zipmap": stdlib.ZipmapFunc, + } + + s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { + // The templatefile function prevents recursive calls to itself + // by copying this map and overwriting the "templatefile" entry. + return s.funcs + }) + + if s.ConsoleMode { + // The type function is only available in terraform console. + s.funcs["type"] = funcs.TypeFunc + } + + if s.PureOnly { + // Force our few impure functions to return unknown so that we + // can defer evaluating them until a later pass. + for _, name := range impureFunctions { + s.funcs[name] = function.Unpredictable(s.funcs[name]) + } + } + } + s.funcsLock.Unlock() + + return s.funcs +} + +// experimentalFunction checks whether the given experiment is enabled for +// the recieving scope. If so, it will return the given function verbatim. +// If not, it will return a placeholder function that just returns an +// error explaining that the function requires the experiment to be enabled. +func (s *Scope) experimentalFunction(experiment experiments.Experiment, fn function.Function) function.Function { + if s.activeExperiments.Has(experiment) { + return fn + } + + err := fmt.Errorf( + "this function is experimental and available only when the experiment keyword %s is enabled for the current module", + experiment.Keyword(), + ) + + return function.New(&function.Spec{ + Params: fn.Params(), + VarParam: fn.VarParam(), + Type: func(args []cty.Value) (cty.Type, error) { + return cty.DynamicPseudoType, err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // It would be weird to get here because the Type function always + // fails, but we'll return an error here too anyway just to be + // robust. + return cty.DynamicVal, err + }, + }) +} diff --git a/internal/terraform/lang/functions_test.go b/internal/terraform/lang/functions_test.go new file mode 100644 index 00000000..f0b42c96 --- /dev/null +++ b/internal/terraform/lang/functions_test.go @@ -0,0 +1,1230 @@ +package lang + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/experiments" + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" +) + +// TestFunctions tests that functions are callable through the functionality +// in the langs package, via HCL. +// +// These tests are primarily here to assert that the functions are properly +// registered in the functions table, rather than to test all of the details +// of the functions. Each function should only have one or two tests here, +// since the main set of unit tests for a function should live alongside that +// function either in the "funcs" subdirectory here or over in the cty +// function/stdlib package. +// +// One exception to that is we can use this test mechanism to assert common +// patterns that are used in real-world configurations which rely on behaviors +// implemented either in this lang package or in HCL itself, such as automatic +// type conversions. The function unit tests don't cover those things because +// they call directly into the functions. +// +// With that said then, this test function should contain at least one simple +// test case per function registered in the functions table (just to prove +// it really is registered correctly) and possibly a small set of additional +// functions showing real-world use-cases that rely on type conversion +// behaviors. +func TestFunctions(t *testing.T) { + // used in `pathexpand()` test + homePath, err := homedir.Dir() + if err != nil { + t.Fatalf("Error getting home directory: %v", err) + } + + tests := map[string][]struct { + src string + want cty.Value + }{ + // Please maintain this list in alphabetical order by function, with + // a blank line between the group of tests for each function. + + "abs": { + { + `abs(-1)`, + cty.NumberIntVal(1), + }, + }, + + "abspath": { + { + `abspath(".")`, + cty.StringVal((func() string { + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + return filepath.ToSlash(cwd) + })()), + }, + }, + + "alltrue": { + { + `alltrue(["true", true])`, + cty.True, + }, + }, + + "anytrue": { + { + `anytrue([])`, + cty.False, + }, + }, + + "base64decode": { + { + `base64decode("YWJjMTIzIT8kKiYoKSctPUB+")`, + cty.StringVal("abc123!?$*&()'-=@~"), + }, + }, + + "base64encode": { + { + `base64encode("abc123!?$*&()'-=@~")`, + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + }, + }, + + "base64gzip": { + { + `base64gzip("test")`, + cty.StringVal("H4sIAAAAAAAA/ypJLS4BAAAA//8BAAD//wx+f9gEAAAA"), + }, + }, + + "base64sha256": { + { + `base64sha256("test")`, + cty.StringVal("n4bQgYhMfWWaL+qgxVrQFaO/TxsrC4Is0V1sFbDwCgg="), + }, + }, + + "base64sha512": { + { + `base64sha512("test")`, + cty.StringVal("7iaw3Ur350mqGo7jwQrpkj9hiYB3Lkc/iBml1JQODbJ6wYX4oOHV+E+IvIh/1nsUNzLDBMxfqa2Ob1f1ACio/w=="), + }, + }, + + "basename": { + { + `basename("testdata/hello.txt")`, + cty.StringVal("hello.txt"), + }, + }, + + "can": { + { + `can(true)`, + cty.True, + }, + { + // Note: "can" only works with expressions that pass static + // validation, because it only gets an opportunity to run in + // that case. The following "works" (captures the error) because + // Terraform understands it as a reference to an attribute + // that does not exist during dynamic evaluation. + // + // "can" doesn't work with references that could never possibly + // be valid and are thus caught during static validation, such + // as an expression like "foo" alone which would be understood + // as an invalid resource reference. + `can({}.baz)`, + cty.False, + }, + }, + + "ceil": { + { + `ceil(1.2)`, + cty.NumberIntVal(2), + }, + }, + + "chomp": { + { + `chomp("goodbye\ncruel\nworld\n")`, + cty.StringVal("goodbye\ncruel\nworld"), + }, + }, + + "chunklist": { + { + `chunklist(["a", "b", "c"], 1)`, + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("b"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("c"), + }), + }), + }, + }, + + "cidrhost": { + { + `cidrhost("192.168.1.0/24", 5)`, + cty.StringVal("192.168.1.5"), + }, + }, + + "cidrnetmask": { + { + `cidrnetmask("192.168.1.0/24")`, + cty.StringVal("255.255.255.0"), + }, + }, + + "cidrsubnet": { + { + `cidrsubnet("192.168.2.0/20", 4, 6)`, + cty.StringVal("192.168.6.0/24"), + }, + }, + + "cidrsubnets": { + { + `cidrsubnets("10.0.0.0/8", 8, 8, 16, 8)`, + cty.ListVal([]cty.Value{ + cty.StringVal("10.0.0.0/16"), + cty.StringVal("10.1.0.0/16"), + cty.StringVal("10.2.0.0/24"), + cty.StringVal("10.3.0.0/16"), + }), + }, + }, + + "coalesce": { + { + `coalesce("first", "second", "third")`, + cty.StringVal("first"), + }, + + { + `coalescelist(["first", "second"], ["third", "fourth"])`, + cty.TupleVal([]cty.Value{ + cty.StringVal("first"), cty.StringVal("second"), + }), + }, + }, + + "coalescelist": { + { + `coalescelist(tolist(["a", "b"]), tolist(["c", "d"]))`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + }, + { + `coalescelist(["a", "b"], ["c", "d"])`, + cty.TupleVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + }, + }, + + "compact": { + { + `compact(["test", "", "test"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("test"), cty.StringVal("test"), + }), + }, + }, + + "concat": { + { + `concat(["a", ""], ["b", "c"])`, + cty.TupleVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal(""), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }, + }, + + "contains": { + { + `contains(["a", "b"], "a")`, + cty.True, + }, + { // Should also work with sets, due to automatic conversion + `contains(toset(["a", "b"]), "a")`, + cty.True, + }, + }, + + "csvdecode": { + { + `csvdecode("a,b,c\n1,2,3\n4,5,6")`, + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("1"), + "b": cty.StringVal("2"), + "c": cty.StringVal("3"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("4"), + "b": cty.StringVal("5"), + "c": cty.StringVal("6"), + }), + }), + }, + }, + + "defaults": { + // This function is pretty specialized and so this is mainly + // just a test that it is defined at all. See the function's + // own unit tests for more interesting test cases. + { + `defaults({a: 4}, {a: 5})`, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NumberIntVal(4), + }), + }, + }, + + "dirname": { + { + `dirname("testdata/hello.txt")`, + cty.StringVal("testdata"), + }, + }, + + "distinct": { + { + `distinct(["a", "b", "a", "b"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), + }), + }, + }, + + "element": { + { + `element(["hello"], 0)`, + cty.StringVal("hello"), + }, + }, + + "file": { + { + `file("hello.txt")`, + cty.StringVal("hello!"), + }, + }, + + "fileexists": { + { + `fileexists("hello.txt")`, + cty.BoolVal(true), + }, + }, + + "fileset": { + { + `fileset(".", "*/hello.*")`, + cty.SetVal([]cty.Value{ + cty.StringVal("subdirectory/hello.tmpl"), + cty.StringVal("subdirectory/hello.txt"), + }), + }, + { + `fileset(".", "subdirectory/hello.*")`, + cty.SetVal([]cty.Value{ + cty.StringVal("subdirectory/hello.tmpl"), + cty.StringVal("subdirectory/hello.txt"), + }), + }, + { + `fileset(".", "hello.*")`, + cty.SetVal([]cty.Value{ + cty.StringVal("hello.tmpl"), + cty.StringVal("hello.txt"), + }), + }, + { + `fileset("subdirectory", "hello.*")`, + cty.SetVal([]cty.Value{ + cty.StringVal("hello.tmpl"), + cty.StringVal("hello.txt"), + }), + }, + }, + + "filebase64": { + { + `filebase64("hello.txt")`, + cty.StringVal("aGVsbG8h"), + }, + }, + + "filebase64sha256": { + { + `filebase64sha256("hello.txt")`, + cty.StringVal("zgYJL7lI2f+sfRo3bkBLJrdXW8wR7gWkYV/vT+w6MIs="), + }, + }, + + "filebase64sha512": { + { + `filebase64sha512("hello.txt")`, + cty.StringVal("xvgdsOn4IGyXHJ5YJuO6gj/7saOpAPgEdlKov3jqmP38dFhVo4U6Y1Z1RY620arxIJ6I6tLRkjgrXEy91oUOAg=="), + }, + }, + + "filemd5": { + { + `filemd5("hello.txt")`, + cty.StringVal("5a8dd3ad0756a93ded72b823b19dd877"), + }, + }, + + "filesha1": { + { + `filesha1("hello.txt")`, + cty.StringVal("8f7d88e901a5ad3a05d8cc0de93313fd76028f8c"), + }, + }, + + "filesha256": { + { + `filesha256("hello.txt")`, + cty.StringVal("ce06092fb948d9ffac7d1a376e404b26b7575bcc11ee05a4615fef4fec3a308b"), + }, + }, + + "filesha512": { + { + `filesha512("hello.txt")`, + cty.StringVal("c6f81db0e9f8206c971c9e5826e3ba823ffbb1a3a900f8047652a8bf78ea98fdfc745855a3853a635675458eb6d1aaf1209e88ead2d192382b5c4cbdd6850e02"), + }, + }, + + "flatten": { + { + `flatten([["a", "b"], ["c", "d"]])`, + cty.TupleVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + cty.StringVal("d"), + }), + }, + }, + + "floor": { + { + `floor(-1.8)`, + cty.NumberFloatVal(-2), + }, + }, + + "format": { + { + `format("Hello, %s!", "Ander")`, + cty.StringVal("Hello, Ander!"), + }, + }, + + "formatlist": { + { + `formatlist("Hello, %s!", ["Valentina", "Ander", "Olivia", "Sam"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("Hello, Valentina!"), + cty.StringVal("Hello, Ander!"), + cty.StringVal("Hello, Olivia!"), + cty.StringVal("Hello, Sam!"), + }), + }, + }, + + "formatdate": { + { + `formatdate("DD MMM YYYY hh:mm ZZZ", "2018-01-04T23:12:01Z")`, + cty.StringVal("04 Jan 2018 23:12 UTC"), + }, + }, + + "indent": { + { + fmt.Sprintf("indent(4, %#v)", Poem), + cty.StringVal("Fleas:\n Adam\n Had'em\n \n E.E. Cummings"), + }, + }, + + "index": { + { + `index(["a", "b", "c"], "a")`, + cty.NumberIntVal(0), + }, + }, + + "join": { + { + `join(" ", ["Hello", "World"])`, + cty.StringVal("Hello World"), + }, + }, + + "jsondecode": { + { + `jsondecode("{\"hello\": \"world\"}")`, + cty.ObjectVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + }, + }, + + "jsonencode": { + { + `jsonencode({"hello"="world"})`, + cty.StringVal("{\"hello\":\"world\"}"), + }, + // We are intentionally choosing to escape <, >, and & characters + // to preserve backwards compatibility with Terraform 0.11 + { + `jsonencode({"hello"=""})`, + cty.StringVal("{\"hello\":\"\\u003ccats \\u0026 kittens\\u003e\"}"), + }, + }, + + "keys": { + { + `keys({"hello"=1, "goodbye"=42})`, + cty.TupleVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("hello"), + }), + }, + }, + + "length": { + { + `length(["the", "quick", "brown", "bear"])`, + cty.NumberIntVal(4), + }, + }, + + "list": { + // There are intentionally no test cases for "list" because + // it is a stub that always returns an error. + }, + + "log": { + { + `log(1, 10)`, + cty.NumberFloatVal(0), + }, + }, + + "lookup": { + { + `lookup({hello=1, goodbye=42}, "goodbye")`, + cty.NumberIntVal(42), + }, + }, + + "lower": { + { + `lower("HELLO")`, + cty.StringVal("hello"), + }, + }, + + "map": { + // There are intentionally no test cases for "map" because + // it is a stub that always returns an error. + }, + + "matchkeys": { + { + `matchkeys(["a", "b", "c"], ["ref1", "ref2", "ref3"], ["ref1"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + }, + { // mixing types in searchset + `matchkeys(["a", "b", "c"], [1, 2, 3], [1, "3"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("c"), + }), + }, + }, + + "max": { + { + `max(12, 54, 3)`, + cty.NumberIntVal(54), + }, + }, + + "md5": { + { + `md5("tada")`, + cty.StringVal("ce47d07243bb6eaf5e1322c81baf9bbf"), + }, + }, + + "merge": { + { + `merge({"a"="b"}, {"c"="d"})`, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("b"), + "c": cty.StringVal("d"), + }), + }, + }, + + "min": { + { + `min(12, 54, 3)`, + cty.NumberIntVal(3), + }, + }, + + "nonsensitive": { + { + // Due to how this test is set up we have no way to get + // a sensitive value other than to generate one with + // another function, so this is a bit odd but does still + // meet the goal of verifying that the "nonsensitive" + // function is correctly registered. + `nonsensitive(sensitive(1))`, + cty.NumberIntVal(1), + }, + }, + + "one": { + { + `one([])`, + cty.NullVal(cty.DynamicPseudoType), + }, + { + `one([true])`, + cty.True, + }, + }, + + "parseint": { + { + `parseint("100", 10)`, + cty.NumberIntVal(100), + }, + }, + + "pathexpand": { + { + `pathexpand("~/test-file")`, + cty.StringVal(filepath.Join(homePath, "test-file")), + }, + }, + + "pow": { + { + `pow(1,0)`, + cty.NumberFloatVal(1), + }, + }, + + "range": { + { + `range(3)`, + cty.ListVal([]cty.Value{ + cty.NumberIntVal(0), + cty.NumberIntVal(1), + cty.NumberIntVal(2), + }), + }, + { + `range(1, 4)`, + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + }, + { + `range(1, 8, 2)`, + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(3), + cty.NumberIntVal(5), + cty.NumberIntVal(7), + }), + }, + }, + + "regex": { + { + `regex("(\\d+)([a-z]+)", "aaa111bbb222")`, + cty.TupleVal([]cty.Value{cty.StringVal("111"), cty.StringVal("bbb")}), + }, + }, + + "regexall": { + { + `regexall("(\\d+)([a-z]+)", "...111aaa222bbb...")`, + cty.ListVal([]cty.Value{ + cty.TupleVal([]cty.Value{cty.StringVal("111"), cty.StringVal("aaa")}), + cty.TupleVal([]cty.Value{cty.StringVal("222"), cty.StringVal("bbb")}), + }), + }, + }, + + "replace": { + { + `replace("hello", "hel", "bel")`, + cty.StringVal("bello"), + }, + }, + + "reverse": { + { + `reverse(["a", true, 0])`, + cty.TupleVal([]cty.Value{cty.Zero, cty.True, cty.StringVal("a")}), + }, + }, + + "rsadecrypt": { + { + fmt.Sprintf("rsadecrypt(%#v, %#v)", CipherBase64, PrivateKey), + cty.StringVal("message"), + }, + }, + + "sensitive": { + { + `sensitive(1)`, + cty.NumberIntVal(1).Mark(marks.Sensitive), + }, + }, + + "setintersection": { + { + `setintersection(["a", "b"], ["b", "c"], ["b", "d"])`, + cty.SetVal([]cty.Value{ + cty.StringVal("b"), + }), + }, + }, + + "setproduct": { + { + `setproduct(["development", "staging", "production"], ["app1", "app2"])`, + cty.ListVal([]cty.Value{ + cty.TupleVal([]cty.Value{cty.StringVal("development"), cty.StringVal("app1")}), + cty.TupleVal([]cty.Value{cty.StringVal("development"), cty.StringVal("app2")}), + cty.TupleVal([]cty.Value{cty.StringVal("staging"), cty.StringVal("app1")}), + cty.TupleVal([]cty.Value{cty.StringVal("staging"), cty.StringVal("app2")}), + cty.TupleVal([]cty.Value{cty.StringVal("production"), cty.StringVal("app1")}), + cty.TupleVal([]cty.Value{cty.StringVal("production"), cty.StringVal("app2")}), + }), + }, + }, + + "setsubtract": { + { + `setsubtract(["a", "b", "c"], ["a", "c"])`, + cty.SetVal([]cty.Value{ + cty.StringVal("b"), + }), + }, + }, + + "setunion": { + { + `setunion(["a", "b"], ["b", "c"], ["d"])`, + cty.SetVal([]cty.Value{ + cty.StringVal("d"), + cty.StringVal("b"), + cty.StringVal("a"), + cty.StringVal("c"), + }), + }, + }, + + "sha1": { + { + `sha1("test")`, + cty.StringVal("a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"), + }, + }, + + "sha256": { + { + `sha256("test")`, + cty.StringVal("9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"), + }, + }, + + "sha512": { + { + `sha512("test")`, + cty.StringVal("ee26b0dd4af7e749aa1a8ee3c10ae9923f618980772e473f8819a5d4940e0db27ac185f8a0e1d5f84f88bc887fd67b143732c304cc5fa9ad8e6f57f50028a8ff"), + }, + }, + + "signum": { + { + `signum(12)`, + cty.NumberFloatVal(1), + }, + }, + + "slice": { + { + // force a list type here for testing + `slice(tolist(["a", "b", "c", "d"]), 1, 3)`, + cty.ListVal([]cty.Value{ + cty.StringVal("b"), cty.StringVal("c"), + }), + }, + { + `slice(["a", "b", 3, 4], 1, 3)`, + cty.TupleVal([]cty.Value{ + cty.StringVal("b"), cty.NumberIntVal(3), + }), + }, + }, + + "sort": { + { + `sort(["banana", "apple"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("apple"), + cty.StringVal("banana"), + }), + }, + }, + + "split": { + { + `split(" ", "Hello World")`, + cty.ListVal([]cty.Value{ + cty.StringVal("Hello"), + cty.StringVal("World"), + }), + }, + }, + + "strrev": { + { + `strrev("hello world")`, + cty.StringVal("dlrow olleh"), + }, + }, + + "substr": { + { + `substr("hello world", 1, 4)`, + cty.StringVal("ello"), + }, + }, + + "sum": { + { + `sum([2340.5,10,3])`, + cty.NumberFloatVal(2353.5), + }, + }, + + "textdecodebase64": { + { + `textdecodebase64("dABlAHMAdAA=", "UTF-16LE")`, + cty.StringVal("test"), + }, + }, + + "textencodebase64": { + { + `textencodebase64("test", "UTF-16LE")`, + cty.StringVal("dABlAHMAdAA="), + }, + }, + + "templatefile": { + { + `templatefile("hello.tmpl", {name = "Jodie"})`, + cty.StringVal("Hello, Jodie!"), + }, + }, + + "timeadd": { + { + `timeadd("2017-11-22T00:00:00Z", "1s")`, + cty.StringVal("2017-11-22T00:00:01Z"), + }, + }, + + "title": { + { + `title("hello")`, + cty.StringVal("Hello"), + }, + }, + + "tobool": { + { + `tobool("false")`, + cty.False, + }, + }, + + "tolist": { + { + `tolist(["a", "b", "c"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), cty.StringVal("c"), + }), + }, + }, + + "tomap": { + { + `tomap({"a" = 1, "b" = 2})`, + cty.MapVal(map[string]cty.Value{ + "a": cty.NumberIntVal(1), + "b": cty.NumberIntVal(2), + }), + }, + }, + + "tonumber": { + { + `tonumber("42")`, + cty.NumberIntVal(42), + }, + }, + + "toset": { + { + `toset(["a", "b", "c"])`, + cty.SetVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), cty.StringVal("c"), + }), + }, + }, + + "tostring": { + { + `tostring("a")`, + cty.StringVal("a"), + }, + }, + + "transpose": { + { + `transpose({"a" = ["1", "2"], "b" = ["2", "3"]})`, + cty.MapVal(map[string]cty.Value{ + "1": cty.ListVal([]cty.Value{cty.StringVal("a")}), + "2": cty.ListVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")}), + "3": cty.ListVal([]cty.Value{cty.StringVal("b")}), + }), + }, + }, + + "trim": { + { + `trim("?!hello?!", "!?")`, + cty.StringVal("hello"), + }, + }, + + "trimprefix": { + { + `trimprefix("helloworld", "hello")`, + cty.StringVal("world"), + }, + }, + + "trimspace": { + { + `trimspace(" hello ")`, + cty.StringVal("hello"), + }, + }, + + "trimsuffix": { + { + `trimsuffix("helloworld", "world")`, + cty.StringVal("hello"), + }, + }, + + "try": { + { + // Note: "try" only works with expressions that pass static + // validation, because it only gets an opportunity to run in + // that case. The following "works" (captures the error) because + // Terraform understands it as a reference to an attribute + // that does not exist during dynamic evaluation. + // + // "try" doesn't work with references that could never possibly + // be valid and are thus caught during static validation, such + // as an expression like "foo" alone which would be understood + // as an invalid resource reference. That's okay because this + // function exists primarily to ease access to dynamically-typed + // structures that Terraform can't statically validate by + // definition. + `try({}.baz, "fallback")`, + cty.StringVal("fallback"), + }, + { + `try("fallback")`, + cty.StringVal("fallback"), + }, + }, + + "upper": { + { + `upper("hello")`, + cty.StringVal("HELLO"), + }, + }, + + "urlencode": { + { + `urlencode("foo:bar@localhost?foo=bar&bar=baz")`, + cty.StringVal("foo%3Abar%40localhost%3Ffoo%3Dbar%26bar%3Dbaz"), + }, + }, + + "uuidv5": { + { + `uuidv5("dns", "tada")`, + cty.StringVal("faa898db-9b9d-5b75-86a9-149e7bb8e3b8"), + }, + { + `uuidv5("url", "tada")`, + cty.StringVal("2c1ff6b4-211f-577e-94de-d978b0caa16e"), + }, + { + `uuidv5("oid", "tada")`, + cty.StringVal("61eeea26-5176-5288-87fc-232d6ed30d2f"), + }, + { + `uuidv5("x500", "tada")`, + cty.StringVal("7e12415e-f7c9-57c3-9e43-52dc9950d264"), + }, + { + `uuidv5("6ba7b810-9dad-11d1-80b4-00c04fd430c8", "tada")`, + cty.StringVal("faa898db-9b9d-5b75-86a9-149e7bb8e3b8"), + }, + }, + + "values": { + { + `values({"hello"="world", "what's"="up"})`, + cty.TupleVal([]cty.Value{ + cty.StringVal("world"), + cty.StringVal("up"), + }), + }, + }, + + "yamldecode": { + { + `yamldecode("true")`, + cty.True, + }, + { + `yamldecode("key: 0ba")`, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.StringVal("0ba"), + }), + }, + }, + + "yamlencode": { + { + `yamlencode(["foo", "bar", true])`, + cty.StringVal("- \"foo\"\n- \"bar\"\n- true\n"), + }, + { + `yamlencode({a = "b", c = "d"})`, + cty.StringVal("\"a\": \"b\"\n\"c\": \"d\"\n"), + }, + { + `yamlencode(true)`, + // the ... here is an "end of document" marker, produced for implied primitive types only + cty.StringVal("true\n...\n"), + }, + }, + + "zipmap": { + { + `zipmap(["hello", "bar"], ["world", "baz"])`, + cty.ObjectVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + "bar": cty.StringVal("baz"), + }), + }, + }, + } + + experimentalFuncs := map[string]experiments.Experiment{} + experimentalFuncs["defaults"] = experiments.ModuleVariableOptionalAttrs + + t.Run("all functions are tested", func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem + } + + // Check that there is at least one test case for each function, omitting + // those functions that do not return consistent values + allFunctions := scope.Functions() + + // TODO: we can test the impure functions partially by configuring the scope + // with PureOnly: true and then verify that they return unknown values of a + // suitable type. + for _, impureFunc := range impureFunctions { + delete(allFunctions, impureFunc) + } + for f := range scope.Functions() { + if _, ok := tests[f]; !ok { + t.Errorf("Missing test for function %s\n", f) + } + } + }) + + for funcName, funcTests := range tests { + t.Run(funcName, func(t *testing.T) { + + // prepareScope starts as a no-op, but if a function is marked as + // experimental in our experimentalFuncs table above then we'll + // reassign this to be a function that activates the appropriate + // experiment. + prepareScope := func(t *testing.T, scope *Scope) {} + + if experiment, isExperimental := experimentalFuncs[funcName]; isExperimental { + // First, we'll run all of the tests without the experiment + // enabled to see that they do actually fail in that case. + for _, test := range funcTests { + testName := fmt.Sprintf("experimental(%s)", test.src) + t.Run(testName, func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem + } + + expr, parseDiags := hclsyntax.ParseExpression([]byte(test.src), "test.hcl", hcl.Pos{Line: 1, Column: 1}) + if parseDiags.HasErrors() { + for _, diag := range parseDiags { + t.Error(diag.Error()) + } + return + } + + _, diags := scope.EvalExpr(expr, cty.DynamicPseudoType) + if !diags.HasErrors() { + t.Errorf("experimental function %q succeeded without its experiment %s enabled\nexpr: %s", funcName, experiment.Keyword(), test.src) + } + }) + } + + // Now make the experiment active in the scope so that the + // function will actually work when we test it below. + prepareScope = func(t *testing.T, scope *Scope) { + t.Helper() + t.Logf("activating experiment %s to test %q", experiment.Keyword(), funcName) + experimentsSet := experiments.NewSet() + experimentsSet.Add(experiment) + scope.SetActiveExperiments(experimentsSet) + } + } + + for _, test := range funcTests { + t.Run(test.src, func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem + } + prepareScope(t, scope) + + expr, parseDiags := hclsyntax.ParseExpression([]byte(test.src), "test.hcl", hcl.Pos{Line: 1, Column: 1}) + if parseDiags.HasErrors() { + for _, diag := range parseDiags { + t.Error(diag.Error()) + } + return + } + + got, diags := scope.EvalExpr(expr, cty.DynamicPseudoType) + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("%s: %s", diag.Description().Summary, diag.Description().Detail) + } + return + } + + if !test.want.RawEquals(got) { + t.Errorf("wrong result\nexpr: %s\ngot: %#v\nwant: %#v", test.src, got, test.want) + } + }) + } + }) + } +} + +const ( + CipherBase64 = "eczGaDhXDbOFRZGhjx2etVzWbRqWDlmq0bvNt284JHVbwCgObiuyX9uV0LSAMY707IEgMkExJqXmsB4OWKxvB7epRB9G/3+F+pcrQpODlDuL9oDUAsa65zEpYF0Wbn7Oh7nrMQncyUPpyr9WUlALl0gRWytOA23S+y5joa4M34KFpawFgoqTu/2EEH4Xl1zo+0fy73fEto+nfkUY+meuyGZ1nUx/+DljP7ZqxHBFSlLODmtuTMdswUbHbXbWneW51D7Jm7xB8nSdiA2JQNK5+Sg5x8aNfgvFTt/m2w2+qpsyFa5Wjeu6fZmXSl840CA07aXbk9vN4I81WmJyblD/ZA==" + PrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9 +c1zEekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPV +Xcxae4MR0BEegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER +1v6eHQa/nchi03MBpT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7 +r6v24u/vp/QTmBIAlNPgadVAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZ +pqcAM8wHRph8mD1EfL9hsz77pHewxolBATV+7QIDAQABAoIBAC1rK+kFW3vrAYm3 ++8/fQnQQw5nec4o6+crng6JVQXLeH32qXShNf8kLLG/Jj0vaYcTPPDZw9JCKkTMQ +0mKj9XR/5DLbBMsV6eNXXuvJJ3x4iKW5eD9WkLD4FKlNarBRyO7j8sfPTqXW7uat +NxWdFH7YsSRvNh/9pyQHLWA5OituidMrYbc3EUx8B1GPNyJ9W8Q8znNYLfwYOjU4 +Wv1SLE6qGQQH9Q0WzA2WUf8jklCYyMYTIywAjGb8kbAJlKhmj2t2Igjmqtwt1PYc +pGlqbtQBDUiWXt5S4YX/1maIQ/49yeNUajjpbJiH3DbhJbHwFTzP3pZ9P9GHOzlG +kYR+wSECgYEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvd +qcliF5vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUCgYEAqNsw +1aSl7WJa27F0DoJdlU9LWerpXcazlJcIdOz/S9QDmSK3RDQTdqfTxRmrxiYI9LEs +mkOkvzlnnOBMpnZ3ZOU5qIRfprecRIi37KDAOHWGnlC0EWGgl46YLb7/jXiWf0AG +Y+DfJJNd9i6TbIDWu8254/erAS6bKMhW/3q7f2kCgYAZ7Id/BiKJAWRpqTRBXlvw +BhXoKvjI2HjYP21z/EyZ+PFPzur/lNaZhIUlMnUfibbwE9pFggQzzf8scM7c7Sf+ +mLoVSdoQ/Rujz7CqvQzi2nKSsM7t0curUIb3lJWee5/UeEaxZcmIufoNUrzohAWH +BJOIPDM4ssUTLRq7wYM9uQKBgHCBau5OP8gE6mjKuXsZXWUoahpFLKwwwmJUp2vQ +pOFPJ/6WZOlqkTVT6QPAcPUbTohKrF80hsZqZyDdSfT3peFx4ZLocBrS56m6NmHR +UYHMvJ8rQm76T1fryHVidz85g3zRmfBeWg8yqT5oFg4LYgfLsPm1gRjOhs8LfPvI +OLlRAoGBAIZ5Uv4Z3s8O7WKXXUe/lq6j7vfiVkR1NW/Z/WLKXZpnmvJ7FgxN4e56 +RXT7GwNQHIY8eDjDnsHxzrxd+raOxOZeKcMHj3XyjCX3NHfTscnsBPAGYpY/Wxzh +T8UYnFu6RzkixElTf2rseEav7rkdKkI3LAeIZy7B0HulKKsmqVQ7 +-----END RSA PRIVATE KEY----- +` + Poem = `Fleas: +Adam +Had'em + +E.E. Cummings` +) diff --git a/internal/terraform/lang/globalref/analyzer.go b/internal/terraform/lang/globalref/analyzer.go new file mode 100644 index 00000000..11b68ab6 --- /dev/null +++ b/internal/terraform/lang/globalref/analyzer.go @@ -0,0 +1,68 @@ +package globalref + +import ( + "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/configs" + "github.com/camptocamp/terraboard/internal/terraform/providers" +) + +// Analyzer is the main component of this package, serving as a container for +// various state that the analysis algorithms depend on either for their core +// functionality or for producing results more quickly. +// +// Global reference analysis is currently intended only for "best effort" +// use-cases related to giving hints to the user or tailoring UI output. +// Avoid using it for anything that would cause changes to the analyzer being +// considered a breaking change under the v1 compatibility promises, because +// we expect to continue to refine and evolve these rules over time in ways +// that may cause us to detect either more or fewer references than today. +// Typically we will conservatively return more references than would be +// necessary dynamically, but that isn't guaranteed for all situations. +// +// In particular, we currently typically don't distinguish between multiple +// instances of the same module, and so we overgeneralize references from +// one instance of a module as references from the same location in all +// instances of that module. We may make this more precise in future, which +// would then remove various detected references from the analysis results. +// +// Each Analyzer works with a particular configs.Config object which it assumes +// represents the root module of a configuration. Config objects are typically +// immutable by convention anyway, but it's particularly important not to +// modify a configuration while it's attached to a live Analyzer, because +// the Analyzer contains caches derived from data in the configuration tree. +type Analyzer struct { + cfg *configs.Config + providerSchemas map[addrs.Provider]*providers.Schemas +} + +// NewAnalyzer constructs a new analyzer bound to the given configuration and +// provider schemas. +// +// The given object must represent a root module, or this function will panic. +// +// The given provider schemas must cover at least all of the providers used +// in the given configuration. If not then analysis results will be silently +// incomplete for any decision that requires checking schema. +func NewAnalyzer(cfg *configs.Config, providerSchemas map[addrs.Provider]*providers.Schemas) *Analyzer { + if !cfg.Path.IsRoot() { + panic(fmt.Sprintf("constructing an Analyzer with non-root module %s", cfg.Path)) + } + + ret := &Analyzer{ + cfg: cfg, + providerSchemas: providerSchemas, + } + return ret +} + +// ModuleConfig retrieves a module configuration from the configuration the +// analyzer belongs to, or nil if there is no module with the given address. +func (a *Analyzer) ModuleConfig(addr addrs.ModuleInstance) *configs.Module { + modCfg := a.cfg.DescendentForInstance(addr) + if modCfg == nil { + return nil + } + return modCfg.Module +} diff --git a/internal/terraform/lang/globalref/analyzer_contributing_resources.go b/internal/terraform/lang/globalref/analyzer_contributing_resources.go new file mode 100644 index 00000000..cd27149f --- /dev/null +++ b/internal/terraform/lang/globalref/analyzer_contributing_resources.go @@ -0,0 +1,130 @@ +package globalref + +import ( + "sort" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" +) + +// ContributingResources analyzes all of the given references and +// for each one tries to walk backwards through any named values to find all +// resources whose values contributed either directly or indirectly to any of +// them. +// +// This is a wrapper around ContributingResourceReferences which simplifies +// the result to only include distinct resource addresses, not full references. +// If the configuration includes several different references to different +// parts of a resource, ContributingResources will not preserve that detail. +func (a *Analyzer) ContributingResources(refs ...Reference) []addrs.AbsResource { + retRefs := a.ContributingResourceReferences(refs...) + if len(retRefs) == 0 { + return nil + } + + uniq := make(map[string]addrs.AbsResource, len(refs)) + for _, ref := range retRefs { + if addr, ok := resourceForAddr(ref.LocalRef.Subject); ok { + moduleAddr := ref.ModuleAddr() + absAddr := addr.Absolute(moduleAddr) + uniq[absAddr.String()] = absAddr + } + } + ret := make([]addrs.AbsResource, 0, len(uniq)) + for _, addr := range uniq { + ret = append(ret, addr) + } + sort.Slice(ret, func(i, j int) bool { + // We only have a sorting function for resource _instances_, but + // it'll do well enough if we just pretend we have no-key instances. + return ret[i].Instance(addrs.NoKey).Less(ret[j].Instance(addrs.NoKey)) + }) + return ret +} + +// ContributingResourceReferences analyzes all of the given references and +// for each one tries to walk backwards through any named values to find all +// references to resource attributes that contributed either directly or +// indirectly to any of them. +// +// This is a global operation that can be potentially quite expensive for +// complex configurations. +func (a *Analyzer) ContributingResourceReferences(refs ...Reference) []Reference { + // Our methodology here is to keep digging through MetaReferences + // until we've visited everything we encounter directly or indirectly, + // and keep track of any resources we find along the way. + + // We'll aggregate our result here, using the string representations of + // the resources as keys to avoid returning the same one more than once. + found := make(map[referenceAddrKey]Reference) + + // We might encounter the same object multiple times as we walk, + // but we won't learn anything more by traversing them again and so we'll + // just skip them instead. + visitedObjects := make(map[referenceAddrKey]struct{}) + + // A queue of objects we still need to visit. + // Note that if we find multiple references to the same object then we'll + // just arbitrary choose any one of them, because for our purposes here + // it's immaterial which reference we actually followed. + pendingObjects := make(map[referenceAddrKey]Reference) + + // Initial state: identify any directly-mentioned resources and + // queue up any named values we refer to. + for _, ref := range refs { + if _, ok := resourceForAddr(ref.LocalRef.Subject); ok { + found[ref.addrKey()] = ref + } + pendingObjects[ref.addrKey()] = ref + } + + for len(pendingObjects) > 0 { + // Note: we modify this map while we're iterating over it, which means + // that anything we add might be either visited within a later + // iteration of the inner loop or in a later iteration of the outer + // loop, but we get the correct result either way because we keep + // working until we've fully depleted the queue. + for key, ref := range pendingObjects { + delete(pendingObjects, key) + + // We do this _before_ the visit below just in case this is an + // invalid config with a self-referential local value, in which + // case we'll just silently ignore the self reference for our + // purposes here, and thus still eventually converge (albeit + // with an incomplete answer). + visitedObjects[key] = struct{}{} + + moreRefs := a.MetaReferences(ref) + for _, newRef := range moreRefs { + if _, ok := resourceForAddr(newRef.LocalRef.Subject); ok { + found[newRef.addrKey()] = newRef + } + + newKey := newRef.addrKey() + if _, visited := visitedObjects[newKey]; !visited { + pendingObjects[newKey] = newRef + } + } + } + } + + if len(found) == 0 { + return nil + } + + ret := make([]Reference, 0, len(found)) + for _, ref := range found { + ret = append(ret, ref) + } + return ret +} + +func resourceForAddr(addr addrs.Referenceable) (addrs.Resource, bool) { + switch addr := addr.(type) { + case addrs.Resource: + return addr, true + case addrs.ResourceInstance: + return addr.Resource, true + default: + return addrs.Resource{}, false + } +} diff --git a/internal/terraform/lang/globalref/analyzer_meta_references.go b/internal/terraform/lang/globalref/analyzer_meta_references.go new file mode 100644 index 00000000..b61d35c7 --- /dev/null +++ b/internal/terraform/lang/globalref/analyzer_meta_references.go @@ -0,0 +1,606 @@ +package globalref + +import ( + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/camptocamp/terraboard/internal/terraform/lang" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// MetaReferences inspects the configuration to find the references contained +// within the most specific object that the given address refers to. +// +// This finds only the direct references in that object, not any indirect +// references from those. This is a building block for some other Analyzer +// functions that can walk through multiple levels of reference. +// +// If the given reference refers to something that doesn't exist in the +// configuration we're analyzing then MetaReferences will return no +// meta-references at all, which is indistinguishable from an existing +// object that doesn't refer to anything. +func (a *Analyzer) MetaReferences(ref Reference) []Reference { + // This function is aiming to encapsulate the fact that a reference + // is actually quite a complex notion which includes both a specific + // object the reference is to, where each distinct object type has + // a very different representation in the configuration, and then + // also potentially an attribute or block within the definition of that + // object. Our goal is to make all of these different situations appear + // mostly the same to the caller, in that all of them can be reduced to + // a set of references regardless of which expression or expressions we + // derive those from. + + moduleAddr := ref.ModuleAddr() + remaining := ref.LocalRef.Remaining + + // Our first task then is to select an appropriate implementation based + // on which address type the reference refers to. + switch targetAddr := ref.LocalRef.Subject.(type) { + case addrs.InputVariable: + return a.metaReferencesInputVariable(moduleAddr, targetAddr, remaining) + case addrs.LocalValue: + return a.metaReferencesLocalValue(moduleAddr, targetAddr, remaining) + case addrs.ModuleCallInstanceOutput: + return a.metaReferencesOutputValue(moduleAddr, targetAddr, remaining) + case addrs.ModuleCallInstance: + return a.metaReferencesModuleCall(moduleAddr, targetAddr, remaining) + case addrs.ModuleCall: + // TODO: It isn't really correct to say that a reference to a module + // call is a reference to its no-key instance. Really what we want to + // say here is that it's a reference to _all_ instances, or to an + // instance with an unknown key, but we don't have any representation + // of that. For the moment it's pretty immaterial since most of our + // other analysis ignores instance keys anyway, but maybe we'll revisit + // this latter to distingish these two cases better. + return a.metaReferencesModuleCall(moduleAddr, targetAddr.Instance(addrs.NoKey), remaining) + case addrs.CountAttr, addrs.ForEachAttr: + if resourceAddr, ok := ref.ResourceInstance(); ok { + return a.metaReferencesCountOrEach(resourceAddr.ContainingResource()) + } + return nil + case addrs.ResourceInstance: + return a.metaReferencesResourceInstance(moduleAddr, targetAddr, remaining) + case addrs.Resource: + // TODO: It isn't really correct to say that a reference to a resource + // is a reference to its no-key instance. Really what we want to say + // here is that it's a reference to _all_ instances, or to an instance + // with an unknown key, but we don't have any representation of that. + // For the moment it's pretty immaterial since most of our other + // analysis ignores instance keys anyway, but maybe we'll revisit this + // latter to distingish these two cases better. + return a.metaReferencesResourceInstance(moduleAddr, targetAddr.Instance(addrs.NoKey), remaining) + default: + // For anything we don't explicitly support we'll just return no + // references. This includes the reference types that don't really + // refer to configuration objects at all, like "path.module", + // and so which cannot possibly generate any references. + return nil + } +} + +func (a *Analyzer) metaReferencesInputVariable(calleeAddr addrs.ModuleInstance, addr addrs.InputVariable, remain hcl.Traversal) []Reference { + if calleeAddr.IsRoot() { + // A root module variable definition can never refer to anything, + // because it conceptually exists outside of any module. + return nil + } + + callerAddr, callAddr := calleeAddr.Call() + + // We need to find the module call inside the caller module. + callerCfg := a.ModuleConfig(callerAddr) + if callerCfg == nil { + return nil + } + call := callerCfg.ModuleCalls[callAddr.Name] + if call == nil { + return nil + } + + // Now we need to look for an attribute matching the variable name inside + // the module block body. + body := call.Config + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: addr.Name}, + }, + } + // We don't check for errors here because we'll make a best effort to + // analyze whatever partial result HCL is able to extract. + content, _, _ := body.PartialContent(schema) + attr := content.Attributes[addr.Name] + if attr == nil { + return nil + } + refs, _ := lang.ReferencesInExpr(attr.Expr) + return absoluteRefs(callerAddr, refs) +} + +func (a *Analyzer) metaReferencesOutputValue(callerAddr addrs.ModuleInstance, addr addrs.ModuleCallInstanceOutput, remain hcl.Traversal) []Reference { + calleeAddr := callerAddr.Child(addr.Call.Call.Name, addr.Call.Key) + + // We need to find the output value declaration inside the callee module. + calleeCfg := a.ModuleConfig(calleeAddr) + if calleeCfg == nil { + return nil + } + + oc := calleeCfg.Outputs[addr.Name] + if oc == nil { + return nil + } + + // We don't check for errors here because we'll make a best effort to + // analyze whatever partial result HCL is able to extract. + refs, _ := lang.ReferencesInExpr(oc.Expr) + return absoluteRefs(calleeAddr, refs) +} + +func (a *Analyzer) metaReferencesLocalValue(moduleAddr addrs.ModuleInstance, addr addrs.LocalValue, remain hcl.Traversal) []Reference { + modCfg := a.ModuleConfig(moduleAddr) + if modCfg == nil { + return nil + } + + local := modCfg.Locals[addr.Name] + if local == nil { + return nil + } + + // We don't check for errors here because we'll make a best effort to + // analyze whatever partial result HCL is able to extract. + refs, _ := lang.ReferencesInExpr(local.Expr) + return absoluteRefs(moduleAddr, refs) +} + +func (a *Analyzer) metaReferencesModuleCall(callerAddr addrs.ModuleInstance, addr addrs.ModuleCallInstance, remain hcl.Traversal) []Reference { + calleeAddr := callerAddr.Child(addr.Call.Name, addr.Key) + + // What we're really doing here is just rolling up all of the references + // from all of this module's output values. + calleeCfg := a.ModuleConfig(calleeAddr) + if calleeCfg == nil { + return nil + } + + var ret []Reference + for name := range calleeCfg.Outputs { + outputAddr := addrs.ModuleCallInstanceOutput{ + Call: addr, + Name: name, + } + moreRefs := a.metaReferencesOutputValue(callerAddr, outputAddr, nil) + ret = append(ret, moreRefs...) + } + return ret +} + +func (a *Analyzer) metaReferencesCountOrEach(resourceAddr addrs.AbsResource) []Reference { + return a.ReferencesFromResourceRepetition(resourceAddr) +} + +func (a *Analyzer) metaReferencesResourceInstance(moduleAddr addrs.ModuleInstance, addr addrs.ResourceInstance, remain hcl.Traversal) []Reference { + modCfg := a.ModuleConfig(moduleAddr) + if modCfg == nil { + return nil + } + + rc := modCfg.ResourceByAddr(addr.Resource) + if rc == nil { + return nil + } + + // In valid cases we should have the schema for this resource type + // available. In invalid cases we might be dealing with partial information, + // and so the schema might be nil so we won't be able to return reference + // information for this particular situation. + providerSchema := a.providerSchemas[rc.Provider] + if providerSchema == nil { + return nil + } + + resourceTypeSchema, _ := providerSchema.SchemaForResourceAddr(addr.Resource) + if resourceTypeSchema == nil { + return nil + } + + // When analyzing the resource configuration to look for references, we'll + // make a best effort to narrow down to only a particular sub-portion of + // the configuration by following the remaining traversal steps. In the + // ideal case this will lead us to a specific expression, but as a + // compromise it might lead us to some nested blocks where at least we + // can limit our searching only to those. + bodies := []hcl.Body{rc.Config} + var exprs []hcl.Expression + schema := resourceTypeSchema + var steppingThrough *configschema.NestedBlock + var steppingThroughType string + nextStep := func(newBodies []hcl.Body, newExprs []hcl.Expression) { + // We append exprs but replace bodies because exprs represent extra + // expressions we collected on the path, such as dynamic block for_each, + // which can potentially contribute to the final evalcontext, but + // bodies never contribute any values themselves, and instead just + // narrow down where we're searching. + bodies = newBodies + exprs = append(exprs, newExprs...) + steppingThrough = nil + steppingThroughType = "" + // Caller must also update "schema" if necessary. + } + traverseInBlock := func(name string) ([]hcl.Body, []hcl.Expression) { + if attr := schema.Attributes[name]; attr != nil { + // When we reach a specific attribute we can't traverse any deeper, because attributes are the leaves of the schema. + schema = nil + return traverseAttr(bodies, name) + } else if blockType := schema.BlockTypes[name]; blockType != nil { + // We need to take a different action here depending on + // the nesting mode of the block type. Some require us + // to traverse in two steps in order to select a specific + // child block, while others we can just step through + // directly. + switch blockType.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + // There should be only zero or one blocks of this + // type, so we can traverse in only one step. + schema = &blockType.Block + return traverseNestedBlockSingle(bodies, name) + case configschema.NestingMap, configschema.NestingList, configschema.NestingSet: + steppingThrough = blockType + return bodies, exprs // Preserve current selections for the second step + default: + // The above should be exhaustive, but just in case + // we add something new in future we'll bail out + // here and conservatively return everything under + // the current traversal point. + schema = nil + return nil, nil + } + } + + // We'll get here if the given name isn't in the schema at all. If so, + // there's nothing else to be done here. + schema = nil + return nil, nil + } +Steps: + for _, step := range remain { + // If we filter out all of our bodies before we finish traversing then + // we know we won't find anything else, because all of our subsequent + // traversal steps won't have any bodies to search. + if len(bodies) == 0 { + return nil + } + // If we no longer have a schema then that suggests we've + // traversed as deep as what the schema covers (e.g. we reached + // a specific attribute) and so we'll stop early, assuming that + // any remaining steps are traversals into an attribute expression + // result. + if schema == nil { + break + } + + switch step := step.(type) { + + case hcl.TraverseAttr: + switch { + case steppingThrough != nil: + // If we're stepping through a NestingMap block then + // it's valid to use attribute syntax to select one of + // the blocks by its label. Other nesting types require + // TraverseIndex, so can never be valid. + if steppingThrough.Nesting != configschema.NestingMap { + nextStep(nil, nil) // bail out + continue + } + nextStep(traverseNestedBlockMap(bodies, steppingThroughType, step.Name)) + schema = &steppingThrough.Block + default: + nextStep(traverseInBlock(step.Name)) + if schema == nil { + // traverseInBlock determined that we've traversed as + // deep as we can with reference to schema, so we'll + // stop here and just process whatever's selected. + break Steps + } + } + case hcl.TraverseIndex: + switch { + case steppingThrough != nil: + switch steppingThrough.Nesting { + case configschema.NestingMap: + keyVal, err := convert.Convert(step.Key, cty.String) + if err != nil { // Invalid traversal, so can't have any refs + nextStep(nil, nil) // bail out + continue + } + nextStep(traverseNestedBlockMap(bodies, steppingThroughType, keyVal.AsString())) + schema = &steppingThrough.Block + case configschema.NestingList: + idxVal, err := convert.Convert(step.Key, cty.Number) + if err != nil { // Invalid traversal, so can't have any refs + nextStep(nil, nil) // bail out + continue + } + var idx int + err = gocty.FromCtyValue(idxVal, &idx) + if err != nil { // Invalid traversal, so can't have any refs + nextStep(nil, nil) // bail out + continue + } + nextStep(traverseNestedBlockList(bodies, steppingThroughType, idx)) + schema = &steppingThrough.Block + default: + // Note that NestingSet ends up in here because we don't + // actually allow traversing into set-backed block types, + // and so such a reference would be invalid. + nextStep(nil, nil) // bail out + continue + } + default: + // When indexing the contents of a block directly we always + // interpret the key as a string representing an attribute + // name. + nameVal, err := convert.Convert(step.Key, cty.String) + if err != nil { // Invalid traversal, so can't have any refs + nextStep(nil, nil) // bail out + continue + } + nextStep(traverseInBlock(nameVal.AsString())) + if schema == nil { + // traverseInBlock determined that we've traversed as + // deep as we can with reference to schema, so we'll + // stop here and just process whatever's selected. + break Steps + } + } + default: + // We shouldn't get here, because the above cases are exhaustive + // for all of the relative traversal types, but we'll be robust in + // case HCL adds more in future and just pretend the traversal + // ended a bit early if so. + break Steps + } + } + + if steppingThrough != nil { + // If we ended in the middle of "stepping through" then we'll conservatively + // use the bodies of _all_ nested blocks of the type we were stepping + // through, because the recipient of this value could refer to any + // of them dynamically. + var labelNames []string + if steppingThrough.Nesting == configschema.NestingMap { + labelNames = []string{"key"} + } + blocks := findBlocksInBodies(bodies, steppingThroughType, labelNames) + for _, block := range blocks { + bodies, exprs = blockParts(block) + } + } + + if len(bodies) == 0 && len(exprs) == 0 { + return nil + } + + var refs []*addrs.Reference + for _, expr := range exprs { + moreRefs, _ := lang.ReferencesInExpr(expr) + refs = append(refs, moreRefs...) + } + if schema != nil { + for _, body := range bodies { + moreRefs, _ := lang.ReferencesInBlock(body, schema) + refs = append(refs, moreRefs...) + } + } + return absoluteRefs(addr.Absolute(moduleAddr), refs) +} + +func traverseAttr(bodies []hcl.Body, name string) ([]hcl.Body, []hcl.Expression) { + if len(bodies) == 0 { + return nil, nil + } + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: name}, + }, + } + // We can find at most one expression per body, because attribute names + // are always unique within a body. + retExprs := make([]hcl.Expression, 0, len(bodies)) + for _, body := range bodies { + content, _, _ := body.PartialContent(schema) + if attr := content.Attributes[name]; attr != nil && attr.Expr != nil { + retExprs = append(retExprs, attr.Expr) + } + } + return nil, retExprs +} + +func traverseNestedBlockSingle(bodies []hcl.Body, typeName string) ([]hcl.Body, []hcl.Expression) { + if len(bodies) == 0 { + return nil, nil + } + + blocks := findBlocksInBodies(bodies, typeName, nil) + var retBodies []hcl.Body + var retExprs []hcl.Expression + for _, block := range blocks { + moreBodies, moreExprs := blockParts(block) + retBodies = append(retBodies, moreBodies...) + retExprs = append(retExprs, moreExprs...) + } + return retBodies, retExprs +} + +func traverseNestedBlockMap(bodies []hcl.Body, typeName string, key string) ([]hcl.Body, []hcl.Expression) { + if len(bodies) == 0 { + return nil, nil + } + + blocks := findBlocksInBodies(bodies, typeName, []string{"key"}) + var retBodies []hcl.Body + var retExprs []hcl.Expression + for _, block := range blocks { + switch block.Type { + case "dynamic": + // For dynamic blocks we allow the key to be chosen dynamically + // and so we'll just conservatively include all dynamic block + // bodies. However, we need to also look for references in some + // arguments of the dynamic block itself. + argExprs, contentBody := dynamicBlockParts(block.Body) + retExprs = append(retExprs, argExprs...) + if contentBody != nil { + retBodies = append(retBodies, contentBody) + } + case typeName: + if len(block.Labels) == 1 && block.Labels[0] == key && block.Body != nil { + retBodies = append(retBodies, block.Body) + } + } + } + return retBodies, retExprs +} + +func traverseNestedBlockList(bodies []hcl.Body, typeName string, idx int) ([]hcl.Body, []hcl.Expression) { + if len(bodies) == 0 { + return nil, nil + } + + schema := &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + {Type: typeName, LabelNames: nil}, + {Type: "dynamic", LabelNames: []string{"type"}}, + }, + } + var retBodies []hcl.Body + var retExprs []hcl.Expression + for _, body := range bodies { + content, _, _ := body.PartialContent(schema) + blocks := content.Blocks + + // A tricky aspect of this scenario is that if there are any "dynamic" + // blocks then we can't statically predict how many concrete blocks they + // will generate, and so consequently we can't predict the indices of + // any statically-defined blocks that might appear after them. + firstDynamic := -1 // -1 means "no dynamic blocks" + for i, block := range blocks { + if block.Type == "dynamic" { + firstDynamic = i + break + } + } + + switch { + case firstDynamic >= 0 && idx >= firstDynamic: + // This is the unfortunate case where the selection could be + // any of the blocks from firstDynamic onwards, and so we + // need to conservatively include all of them in our result. + for _, block := range blocks[firstDynamic:] { + moreBodies, moreExprs := blockParts(block) + retBodies = append(retBodies, moreBodies...) + retExprs = append(retExprs, moreExprs...) + } + default: + // This is the happier case where we can select just a single + // static block based on idx. Note that this one is guaranteed + // to never be dynamic but we're using blockParts here just + // for consistency. + moreBodies, moreExprs := blockParts(blocks[idx]) + retBodies = append(retBodies, moreBodies...) + retExprs = append(retExprs, moreExprs...) + } + } + + return retBodies, retExprs +} + +func findBlocksInBodies(bodies []hcl.Body, typeName string, labelNames []string) []*hcl.Block { + // We need to look for both static blocks of the given type, and any + // dynamic blocks whose label gives the expected type name. + schema := &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + {Type: typeName, LabelNames: labelNames}, + {Type: "dynamic", LabelNames: []string{"type"}}, + }, + } + var blocks []*hcl.Block + for _, body := range bodies { + // We ignore errors here because we'll just make a best effort to analyze + // whatever partial result HCL returns in that case. + content, _, _ := body.PartialContent(schema) + + for _, block := range content.Blocks { + switch block.Type { + case "dynamic": + if len(block.Labels) != 1 { // Invalid + continue + } + if block.Labels[0] == typeName { + blocks = append(blocks, block) + } + case typeName: + blocks = append(blocks, block) + } + } + } + + // NOTE: The caller still needs to check for dynamic vs. static in order + // to do further processing. The callers above all aim to encapsulate + // that. + return blocks +} + +func blockParts(block *hcl.Block) ([]hcl.Body, []hcl.Expression) { + switch block.Type { + case "dynamic": + exprs, contentBody := dynamicBlockParts(block.Body) + var bodies []hcl.Body + if contentBody != nil { + bodies = []hcl.Body{contentBody} + } + return bodies, exprs + default: + if block.Body == nil { + return nil, nil + } + return []hcl.Body{block.Body}, nil + } +} + +func dynamicBlockParts(body hcl.Body) ([]hcl.Expression, hcl.Body) { + if body == nil { + return nil, nil + } + + // This is a subset of the "dynamic" block schema defined by the HCL + // dynblock extension, covering only the two arguments that are allowed + // to be arbitrary expressions possibly referring elsewhere. + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "for_each"}, + {Name: "labels"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "content"}, + }, + } + content, _, _ := body.PartialContent(schema) + var exprs []hcl.Expression + if len(content.Attributes) != 0 { + exprs = make([]hcl.Expression, 0, len(content.Attributes)) + } + for _, attr := range content.Attributes { + if attr.Expr != nil { + exprs = append(exprs, attr.Expr) + } + } + var contentBody hcl.Body + for _, block := range content.Blocks { + if block != nil && block.Type == "content" && block.Body != nil { + contentBody = block.Body + } + } + return exprs, contentBody +} diff --git a/internal/terraform/lang/globalref/analyzer_meta_references_shortcuts.go b/internal/terraform/lang/globalref/analyzer_meta_references_shortcuts.go new file mode 100644 index 00000000..71e6b9bf --- /dev/null +++ b/internal/terraform/lang/globalref/analyzer_meta_references_shortcuts.go @@ -0,0 +1,87 @@ +package globalref + +import ( + "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/lang" +) + +// ReferencesFromOutputValue returns all of the direct references from the +// value expression of the given output value. It doesn't include any indirect +// references. +func (a *Analyzer) ReferencesFromOutputValue(addr addrs.AbsOutputValue) []Reference { + mc := a.ModuleConfig(addr.Module) + if mc == nil { + return nil + } + oc := mc.Outputs[addr.OutputValue.Name] + if oc == nil { + return nil + } + refs, _ := lang.ReferencesInExpr(oc.Expr) + return absoluteRefs(addr.Module, refs) +} + +// ReferencesFromResourceInstance returns all of the direct references from the +// definition of the resource instance at the given address. It doesn't include +// any indirect references. +// +// The result doesn't directly include references from a "count" or "for_each" +// expression belonging to the associated resource, but it will include any +// references to count.index, each.key, or each.value that appear in the +// expressions which you can then, if you wish, resolve indirectly using +// Analyzer.MetaReferences. Alternatively, you can use +// Analyzer.ReferencesFromResourceRepetition to get that same result directly. +func (a *Analyzer) ReferencesFromResourceInstance(addr addrs.AbsResourceInstance) []Reference { + // Using MetaReferences for this is kinda overkill, since + // lang.ReferencesInBlock would be sufficient really, but + // this ensures we keep consistent in how we build the + // resulting absolute references and otherwise aside from + // some extra overhead this call boils down to a call to + // lang.ReferencesInBlock anyway. + fakeRef := Reference{ + ContainerAddr: addr.Module, + LocalRef: &addrs.Reference{ + Subject: addr.Resource, + }, + } + return a.MetaReferences(fakeRef) +} + +// ReferencesFromResourceRepetition returns the references from the given +// resource's for_each or count expression, or an empty set if the resource +// doesn't use repetition. +// +// This is a special-case sort of helper for use in situations where an +// expression might refer to count.index, each.key, or each.value, and thus +// we say that it depends indirectly on the repetition expression. +func (a *Analyzer) ReferencesFromResourceRepetition(addr addrs.AbsResource) []Reference { + modCfg := a.ModuleConfig(addr.Module) + if modCfg == nil { + return nil + } + rc := modCfg.ResourceByAddr(addr.Resource) + if rc == nil { + return nil + } + + // We're assuming here that resources can either have count or for_each, + // but never both, because that's a requirement enforced by the language + // decoder. But we'll assert it just to make sure we catch it if that + // changes for some reason. + if rc.ForEach != nil && rc.Count != nil { + panic(fmt.Sprintf("%s has both for_each and count", addr)) + } + + switch { + case rc.ForEach != nil: + refs, _ := lang.ReferencesInExpr(rc.ForEach) + return absoluteRefs(addr.Module, refs) + case rc.Count != nil: + refs, _ := lang.ReferencesInExpr(rc.Count) + return absoluteRefs(addr.Module, refs) + default: + return nil + } +} diff --git a/internal/terraform/lang/globalref/doc.go b/internal/terraform/lang/globalref/doc.go new file mode 100644 index 00000000..133a9e7f --- /dev/null +++ b/internal/terraform/lang/globalref/doc.go @@ -0,0 +1,9 @@ +// Package globalref is home to some analysis algorithms that aim to answer +// questions about references between objects and object attributes across +// an entire configuration. +// +// This is a different problem than references within a single module, which +// we handle using some relatively simpler functions in the "lang" package +// in the parent directory. The globalref algorithms are often implemented +// in terms of those module-local reference-checking functions. +package globalref diff --git a/internal/terraform/lang/globalref/reference.go b/internal/terraform/lang/globalref/reference.go new file mode 100644 index 00000000..ae0392ad --- /dev/null +++ b/internal/terraform/lang/globalref/reference.go @@ -0,0 +1,200 @@ +package globalref + +import ( + "fmt" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// Reference combines an addrs.Reference with the address of the module +// instance or resource instance where it was found. +// +// Because of the design of the Terraform language, our main model of +// references only captures the module-local part of the reference and assumes +// that it's always clear from context which module a reference belongs to. +// That's not true for globalref because our whole purpose is to work across +// module boundaries, and so this package in particular has its own +// representation of references. +type Reference struct { + // ContainerAddr is always either addrs.ModuleInstance or + // addrs.AbsResourceInstance. The latter is required if LocalRef's + // subject is either an addrs.CountAddr or addrs.ForEachAddr, so + // we can know which resource's repetition expression it's + // referring to. + ContainerAddr addrs.Targetable + + // LocalRef is a reference that would be resolved in the context + // of the module instance or resource instance given in ContainerAddr. + LocalRef *addrs.Reference +} + +func absoluteRef(containerAddr addrs.Targetable, localRef *addrs.Reference) Reference { + ret := Reference{ + ContainerAddr: containerAddr, + LocalRef: localRef, + } + // For simplicity's sake, we always reduce the ContainerAddr to be + // just the module address unless it's a count.index, each.key, or + // each.value reference, because for anything else it's immaterial + // which resource it belongs to. + switch localRef.Subject.(type) { + case addrs.CountAttr, addrs.ForEachAttr: + // nothing to do + default: + ret.ContainerAddr = ret.ModuleAddr() + } + return ret +} + +func absoluteRefs(containerAddr addrs.Targetable, refs []*addrs.Reference) []Reference { + if len(refs) == 0 { + return nil + } + + ret := make([]Reference, len(refs)) + for i, ref := range refs { + ret[i] = absoluteRef(containerAddr, ref) + } + return ret +} + +// ModuleAddr returns the address of the module where the reference would +// be resolved. +// +// This is either ContainerAddr directly if it's already just a module +// instance, or the module instance part of it if it's a resource instance. +func (r Reference) ModuleAddr() addrs.ModuleInstance { + switch addr := r.ContainerAddr.(type) { + case addrs.ModuleInstance: + return addr + case addrs.AbsResourceInstance: + return addr.Module + default: + // NOTE: We're intentionally using only a subset of possible + // addrs.Targetable implementations here, so anything else + // is invalid. + panic(fmt.Sprintf("reference has invalid container address type %T", addr)) + } +} + +// ResourceInstance returns the address of the resource where the reference +// would be resolved, if there is one. +// +// Because not all references belong to resources, the extra boolean return +// value indicates whether the returned address is valid. +func (r Reference) ResourceInstance() (addrs.AbsResourceInstance, bool) { + switch container := r.ContainerAddr.(type) { + case addrs.ModuleInstance: + moduleInstance := container + + switch ref := r.LocalRef.Subject.(type) { + case addrs.Resource: + return ref.Instance(addrs.NoKey).Absolute(moduleInstance), true + case addrs.ResourceInstance: + return ref.Absolute(moduleInstance), true + } + + return addrs.AbsResourceInstance{}, false + + case addrs.AbsResourceInstance: + return container, true + default: + // NOTE: We're intentionally using only a subset of possible + // addrs.Targetable implementations here, so anything else + // is invalid. + panic(fmt.Sprintf("reference has invalid container address type %T", container)) + } +} + +// DebugString returns an internal (but still somewhat Terraform-language-like) +// compact string representation of the reciever, which isn't an address that +// any of our usual address parsers could accept but still captures the +// essence of what the reference represents. +// +// The DebugString result is not suitable for end-user-oriented messages. +// +// DebugString is also not suitable for use as a unique key for a reference, +// because it's ambiguous (between a no-key resource instance and a resource) +// and because it discards the source location information in the LocalRef. +func (r Reference) DebugString() string { + // As the doc comment insinuates, we don't have any real syntax for + // "absolute references": references are always local, and targets are + // always absolute but only include modules and resources. + return r.ContainerAddr.String() + "::" + r.LocalRef.DisplayString() +} + +// ResourceAttr converts the Reference value to a more specific ResourceAttr +// value. +// +// Because not all references belong to resources, the extra boolean return +// value indicates whether the returned address is valid. +func (r Reference) ResourceAttr() (ResourceAttr, bool) { + res, ok := r.ResourceInstance() + if !ok { + return ResourceAttr{}, ok + } + + traversal := r.LocalRef.Remaining + + path := make(cty.Path, len(traversal)) + for si, step := range traversal { + switch ts := step.(type) { + case hcl.TraverseRoot: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseAttr: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseIndex: + path[si] = cty.IndexStep{ + Key: ts.Key, + } + default: + panic(fmt.Sprintf("unsupported traversal step %#v", step)) + } + } + + return ResourceAttr{ + Resource: res, + Attr: path, + }, true +} + +// addrKey returns the referenceAddrKey value for the item that +// this reference refers to, discarding any source location information. +// +// See the referenceAddrKey doc comment for more information on what this +// is suitable for. +func (r Reference) addrKey() referenceAddrKey { + // This is a pretty arbitrary bunch of stuff. We include the type here + // just to differentiate between no-key resource instances and resources. + return referenceAddrKey(fmt.Sprintf("%s(%T)%s", r.ContainerAddr.String(), r.LocalRef.Subject, r.LocalRef.DisplayString())) +} + +// referenceAddrKey is a special string type which conventionally contains +// a unique string representation of the object that a reference refers to, +// although not of the reference itself because it ignores the information +// that would differentiate two different references to the same object. +// +// The actual content of a referenceAddrKey is arbitrary, for internal use +// only. and subject to change in future. We use a named type here only to +// make it easier to see when we're intentionally using strings to uniquely +// identify absolute reference addresses. +type referenceAddrKey string + +// ResourceAttr represents a global resource and attribute reference. +// This is a more specific form of the Reference type since it can only refer +// to a specific AbsResource and one of its attributes. +type ResourceAttr struct { + Resource addrs.AbsResourceInstance + Attr cty.Path +} + +func (r ResourceAttr) DebugString() string { + return r.Resource.String() + tfdiags.FormatCtyPath(r.Attr) +} diff --git a/internal/terraform/lang/globalref/testdata/assorted/assorted-root.tf b/internal/terraform/lang/globalref/testdata/assorted/assorted-root.tf new file mode 100644 index 00000000..09f730ee --- /dev/null +++ b/internal/terraform/lang/globalref/testdata/assorted/assorted-root.tf @@ -0,0 +1,47 @@ +locals { + a = "hello world" + b = 2 + single = test_thing.single.id +} + +resource "test_thing" "single" { + string = local.a + number = local.b + +} + +resource "test_thing" "for_each" { + for_each = {"q": local.a} + + string = local.a + + single { + z = test_thing.single.string + } +} + +resource "test_thing" "count" { + for_each = length(local.a) + + string = local.a +} + +module "single" { + source = "./child" + + a = test_thing.single +} + +module "for_each" { + source = "./child" + for_each = {"q": test_thing.single} + + a = test_thing.single +} + +module "count" { + source = "./child" + count = length(test_thing.single.string) + + a = test_thing.single +} diff --git a/internal/terraform/lang/globalref/testdata/assorted/child/assorted-child.tf b/internal/terraform/lang/globalref/testdata/assorted/child/assorted-child.tf new file mode 100644 index 00000000..e722fe8e --- /dev/null +++ b/internal/terraform/lang/globalref/testdata/assorted/child/assorted-child.tf @@ -0,0 +1,13 @@ +variable "a" { +} + +resource "test_thing" "foo" { + string = var.a +} + +output "a" { + value = { + a = var.a + foo = test_thing.foo + } +} diff --git a/internal/terraform/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf b/internal/terraform/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf new file mode 100644 index 00000000..a88ec466 --- /dev/null +++ b/internal/terraform/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf @@ -0,0 +1,53 @@ +variable "network" { + type = object({ + vpc_id = string + subnet_ids = map(string) + }) +} + +resource "test_thing" "controller" { + for_each = var.network.subnet_ids + + string = each.value +} + +locals { + workers = flatten([ + for k, id in var.network_subnet_ids : [ + for n in range(3) : { + unique_key = "${k}:${n}" + subnet_id = n + } + ] + ]) + + controllers = test_thing.controller +} + +resource "test_thing" "worker" { + for_each = { for o in local.workers : o.unique_key => o.subnet_id } + + string = each.value + + dynamic "list" { + for_each = test_thing.controller + content { + z = list.value.string + } + } +} + +resource "test_thing" "load_balancer" { + string = var.network.vpc_id + + dynamic "list" { + for_each = local.controllers + content { + z = list.value.string + } + } +} + +output "compuneetees_api_url" { + value = test_thing.load_balancer.string +} diff --git a/internal/terraform/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf b/internal/terraform/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf new file mode 100644 index 00000000..d6ec5c48 --- /dev/null +++ b/internal/terraform/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf @@ -0,0 +1,28 @@ +variable "environment" { + type = string +} + +data "test_thing" "environment" { + string = var.environment +} + +module "network" { + source = "./network" + + base_cidr_block = data.test_thing.environment.any.base_cidr_block + subnet_count = data.test_thing.environment.any.subnet_count +} + +module "compute" { + source = "./compute" + + network = module.network +} + +output "network" { + value = module.network +} + +output "c10s_url" { + value = module.compute.compuneetees_api_url +} diff --git a/internal/terraform/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf b/internal/terraform/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf new file mode 100644 index 00000000..3a4c9dc1 --- /dev/null +++ b/internal/terraform/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf @@ -0,0 +1,41 @@ +variable "base_cidr_block" { + type = string +} + +variable "subnet_count" { + type = number +} + +locals { + subnet_newbits = log(var.subnet_count, 2) + subnet_cidr_blocks = toset([ + for n in range(var.subnet_count) : cidrsubnet(var.base_cidr_block, local.subnet_newbits, n) + ]) +} + +resource "test_thing" "vpc" { + string = var.base_cidr_block +} + +resource "test_thing" "subnet" { + for_each = local.subnet_cidr_blocks + + string = test_thing.vpc.string + single { + z = each.value + } +} + +resource "test_thing" "route_table" { + for_each = local.subnet_cidr_blocks + + string = each.value +} + +output "vpc_id" { + value = test_thing.vpc.string +} + +output "subnet_ids" { + value = { for k, sn in test_thing.subnet : k => sn.string } +} diff --git a/internal/terraform/lang/marks/marks.go b/internal/terraform/lang/marks/marks.go new file mode 100644 index 00000000..78bb5273 --- /dev/null +++ b/internal/terraform/lang/marks/marks.go @@ -0,0 +1,42 @@ +package marks + +import ( + "github.com/zclconf/go-cty/cty" +) + +// valueMarks allow creating strictly typed values for use as cty.Value marks. +// Each distinct mark value must be a constant in this package whose value +// is a valueMark whose underlying string matches the name of the variable. +type valueMark string + +func (m valueMark) GoString() string { + return "marks." + string(m) +} + +// Has returns true if and only if the cty.Value has the given mark. +func Has(val cty.Value, mark valueMark) bool { + return val.HasMark(mark) +} + +// Contains returns true if the cty.Value or any any value within it contains +// the given mark. +func Contains(val cty.Value, mark valueMark) bool { + ret := false + cty.Walk(val, func(_ cty.Path, v cty.Value) (bool, error) { + if v.HasMark(mark) { + ret = true + return false, nil + } + return true, nil + }) + return ret +} + +// Sensitive indicates that this value is marked as sensitive in the context of +// Terraform. +const Sensitive = valueMark("Sensitive") + +// TypeType is used to indicate that the value contains a representation of +// another value's type. This is part of the implementation of the console-only +// `type` function. +const TypeType = valueMark("TypeType") diff --git a/internal/terraform/lang/references.go b/internal/terraform/lang/references.go new file mode 100644 index 00000000..90c6406f --- /dev/null +++ b/internal/terraform/lang/references.go @@ -0,0 +1,81 @@ +package lang + +import ( + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" + "github.com/camptocamp/terraboard/internal/terraform/lang/blocktoattr" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" + "github.com/hashicorp/hcl/v2" +) + +// References finds all of the references in the given set of traversals, +// returning diagnostics if any of the traversals cannot be interpreted as a +// reference. +// +// This function does not do any de-duplication of references, since references +// have source location information embedded in them and so any invalid +// references that are duplicated should have errors reported for each +// occurence. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. Otherwise, the returned slice has one reference per +// given traversal, though it is not guaranteed that the references will +// appear in the same order as the given traversals. +func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { + if len(traversals) == 0 { + return nil, nil + } + + var diags tfdiags.Diagnostics + refs := make([]*addrs.Reference, 0, len(traversals)) + + for _, traversal := range traversals { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if ref == nil { + continue + } + refs = append(refs, ref) + } + + return refs, diags +} + +// ReferencesInBlock is a helper wrapper around References that first searches +// the given body for traversals, before converting those traversals to +// references. +// +// A block schema must be provided so that this function can determine where in +// the body variables are expected. +func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { + if body == nil { + return nil, nil + } + + // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or + // dynblock.VariablesHCLDec here because when we evaluate a block we'll + // first apply the dynamic block extension and _then_ the blocktoattr + // transform, and so blocktoattr.ExpandedVariables takes into account + // both of those transforms when it analyzes the body to ensure we find + // all of the references as if they'd already moved into their final + // locations, even though we can't expand dynamic blocks yet until we + // already know which variables are required. + // + // The set of cases we want to detect here is covered by the tests for + // the plan graph builder in the main 'terraform' package, since it's + // in a better position to test this due to having mock providers etc + // available. + traversals := blocktoattr.ExpandedVariables(body, schema) + return References(traversals) +} + +// ReferencesInExpr is a helper wrapper around References that first searches +// the given expression for traversals, before converting those traversals +// to references. +func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { + if expr == nil { + return nil, nil + } + traversals := expr.Variables() + return References(traversals) +} diff --git a/internal/terraform/lang/scope.go b/internal/terraform/lang/scope.go new file mode 100644 index 00000000..5786a142 --- /dev/null +++ b/internal/terraform/lang/scope.go @@ -0,0 +1,51 @@ +package lang + +import ( + "sync" + + "github.com/zclconf/go-cty/cty/function" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/experiments" +) + +// Scope is the main type in this package, allowing dynamic evaluation of +// blocks and expressions based on some contextual information that informs +// which variables and functions will be available. +type Scope struct { + // Data is used to resolve references in expressions. + Data Data + + // SelfAddr is the address that the "self" object should be an alias of, + // or nil if the "self" object should not be available at all. + SelfAddr addrs.Referenceable + + // BaseDir is the base directory used by any interpolation functions that + // accept filesystem paths as arguments. + BaseDir string + + // PureOnly can be set to true to request that any non-pure functions + // produce unknown value results rather than actually executing. This is + // important during a plan phase to avoid generating results that could + // then differ during apply. + PureOnly bool + + funcs map[string]function.Function + funcsLock sync.Mutex + + // activeExperiments is an optional set of experiments that should be + // considered as active in the module that this scope will be used for. + // Callers can populate it by calling the SetActiveExperiments method. + activeExperiments experiments.Set + + // ConsoleMode can be set to true to request any console-only functions are + // included in this scope. + ConsoleMode bool +} + +// SetActiveExperiments allows a caller to declare that a set of experiments +// is active for the module that the receiving Scope belongs to, which might +// then cause the scope to activate some additional experimental behaviors. +func (s *Scope) SetActiveExperiments(active experiments.Set) { + s.activeExperiments = active +} diff --git a/internal/terraform/lang/testdata/functions-test/hello.tmpl b/internal/terraform/lang/testdata/functions-test/hello.tmpl new file mode 100644 index 00000000..f112ef89 --- /dev/null +++ b/internal/terraform/lang/testdata/functions-test/hello.tmpl @@ -0,0 +1 @@ +Hello, ${name}! \ No newline at end of file diff --git a/internal/terraform/lang/testdata/functions-test/hello.txt b/internal/terraform/lang/testdata/functions-test/hello.txt new file mode 100644 index 00000000..3462721f --- /dev/null +++ b/internal/terraform/lang/testdata/functions-test/hello.txt @@ -0,0 +1 @@ +hello! \ No newline at end of file diff --git a/internal/terraform/lang/testdata/functions-test/subdirectory/hello.tmpl b/internal/terraform/lang/testdata/functions-test/subdirectory/hello.tmpl new file mode 100644 index 00000000..f112ef89 --- /dev/null +++ b/internal/terraform/lang/testdata/functions-test/subdirectory/hello.tmpl @@ -0,0 +1 @@ +Hello, ${name}! \ No newline at end of file diff --git a/internal/terraform/lang/testdata/functions-test/subdirectory/hello.txt b/internal/terraform/lang/testdata/functions-test/subdirectory/hello.txt new file mode 100644 index 00000000..3462721f --- /dev/null +++ b/internal/terraform/lang/testdata/functions-test/subdirectory/hello.txt @@ -0,0 +1 @@ +hello! \ No newline at end of file diff --git a/internal/terraform/lang/types/type_type.go b/internal/terraform/lang/types/type_type.go new file mode 100644 index 00000000..14edf5ec --- /dev/null +++ b/internal/terraform/lang/types/type_type.go @@ -0,0 +1,12 @@ +package types + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// TypeType is a capsule type used to represent a cty.Type as a cty.Value. This +// is used by the `type()` console function to smuggle cty.Type values to the +// REPL session, where it can be displayed to the user directly. +var TypeType = cty.Capsule("type", reflect.TypeOf(cty.Type{})) diff --git a/internal/terraform/lang/types/types.go b/internal/terraform/lang/types/types.go new file mode 100644 index 00000000..69355d90 --- /dev/null +++ b/internal/terraform/lang/types/types.go @@ -0,0 +1,2 @@ +// Package types contains non-standard cty types used only within Terraform. +package types diff --git a/internal/terraform/logging/logging.go b/internal/terraform/logging/logging.go index 11bdd474..972f7cfa 100644 --- a/internal/terraform/logging/logging.go +++ b/internal/terraform/logging/logging.go @@ -8,6 +8,11 @@ import ( "strings" "syscall" + // go.etcd.io/etcd imports capnslog, which calls log.SetOutput in its + // init() function, so importing it here means that our log.SetOutput + // wins. this is fixed in coreos v3.5, which is not released yet. See + // https://github.com/etcd-io/etcd/issues/12498 for more information. + _ "github.com/coreos/pkg/capnslog" "github.com/hashicorp/go-hclog" ) @@ -187,3 +192,35 @@ func isValidLogLevel(level string) bool { return false } + +// PluginOutputMonitor creates an io.Writer that will warn about any writes in +// the default logger. This is used to catch unexpected output from plugins, +// notifying them about the problem as well as surfacing the lost data for +// context. +func PluginOutputMonitor(source string) io.Writer { + return pluginOutputMonitor{ + source: source, + log: logger, + } +} + +// pluginOutputMonitor is an io.Writer that logs all writes as +// "unexpected data" with the source name. +type pluginOutputMonitor struct { + source string + log hclog.Logger +} + +func (w pluginOutputMonitor) Write(d []byte) (int, error) { + // Limit the write size to 1024 bytes We're not expecting any data to come + // through this channel, so accidental writes will usually be stray fmt + // debugging statements and the like, but we want to provide context to the + // provider to indicate what the unexpected data might be. + n := len(d) + if n > 1024 { + d = append(d[:1024], '.', '.', '.') + } + + w.log.Warn("unexpected data", w.source, strings.TrimSpace(string(d))) + return n, nil +} diff --git a/internal/terraform/logging/panic.go b/internal/terraform/logging/panic.go index 211a1231..25ecc6d4 100644 --- a/internal/terraform/logging/panic.go +++ b/internal/terraform/logging/panic.go @@ -2,75 +2,62 @@ package logging import ( "fmt" - "io" - "io/ioutil" "os" + "runtime/debug" "strings" "sync" "github.com/hashicorp/go-hclog" - "github.com/mitchellh/panicwrap" ) // This output is shown if a panic happens. const panicOutput = ` - !!!!!!!!!!!!!!!!!!!!!!!!!!! TERRAFORM CRASH !!!!!!!!!!!!!!!!!!!!!!!!!!!! Terraform crashed! This is always indicative of a bug within Terraform. -A crash log has been placed at %[1]q relative to your current -working directory. It would be immensely helpful if you could please -report the crash with Terraform[1] so that we can fix this. - -When reporting bugs, please include your terraform version. That -information is available on the first line of crash.log. You can also -get it by running 'terraform --version' on the command line. +Please report the crash with Terraform[1] so that we can fix this. -SECURITY WARNING: the %[1]q file that was created may contain -sensitive information that must be redacted before it is safe to share -on the issue tracker. +When reporting bugs, please include your terraform version, the stack trace +shown below, and any additional information which may help replicate the issue. [1]: https://github.com/hashicorp/terraform/issues !!!!!!!!!!!!!!!!!!!!!!!!!!! TERRAFORM CRASH !!!!!!!!!!!!!!!!!!!!!!!!!!!! -` -// panicHandler is what is called by panicwrap when a panic is encountered -// within Terraform. It is guaranteed to run after the resulting process has -// exited so we can take the log file, add in the panic, and store it -// somewhere locally. -func PanicHandler(tmpLogPath string) panicwrap.HandlerFunc { - return func(m string) { - // Create the crash log file where we'll write the logs - f, err := ioutil.TempFile(".", "crash.*.log") - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create crash log file: %s", err) - return - } - defer f.Close() +` - tmpLog, err := os.Open(tmpLogPath) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to open log file %q: %v\n", tmpLogPath, err) - return - } - defer tmpLog.Close() +// In case multiple goroutines panic concurrently, ensure only the first one +// recovered by PanicHandler starts printing. +var panicMutex sync.Mutex + +// PanicHandler is called to recover from an internal panic in Terraform, and +// augments the standard stack trace with a more user friendly error message. +// PanicHandler must be called as a defered function, and must be the first +// defer called at the start of a new goroutine. +func PanicHandler() { + // Have all managed goroutines checkin here, and prevent them from exiting + // if there's a panic in progress. While this can't lock the entire runtime + // to block progress, we can prevent some cases where Terraform may return + // early before the panic has been printed out. + panicMutex.Lock() + defer panicMutex.Unlock() + + recovered := recover() + if recovered == nil { + return + } - // Copy the contents to the crash file. This will include - // the panic that just happened. - if _, err = io.Copy(f, tmpLog); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write crash log: %s", err) - return - } + fmt.Fprint(os.Stderr, panicOutput) + fmt.Fprint(os.Stderr, recovered, "\n") - // add the trace back to the log - f.WriteString("\n" + m) + // When called from a deferred function, debug.PrintStack will include the + // full stack from the point of the pending panic. + debug.PrintStack() - // Tell the user a crash occurred in some helpful way that - // they'll hopefully notice. - fmt.Printf("\n\n") - fmt.Printf(panicOutput, f.Name()) - } + // An exit code of 11 keeps us out of the way of the detailed exitcodes + // from plan, and also happens to be the same code as SIGSEGV which is + // roughly the same type of condition that causes most panics. + os.Exit(11) } const pluginPanicOutput = ` @@ -181,13 +168,5 @@ func (l *logPanicWrapper) Debug(msg string, args ...interface{}) { l.panicRecorder(msg) } - // If we have logging turned on, we need to prevent panicwrap from seeing - // this as a core panic. This can be done by obfuscating the panic error - // line. - if panicPrefix { - colon := strings.Index(msg, ":") - msg = strings.ToUpper(msg[:colon]) + msg[colon:] - } - l.Logger.Debug(msg, args...) } diff --git a/internal/terraform/logging/panic_test.go b/internal/terraform/logging/panic_test.go index d2eb7a90..e83a0ba5 100644 --- a/internal/terraform/logging/panic_test.go +++ b/internal/terraform/logging/panic_test.go @@ -1,12 +1,9 @@ package logging import ( - "bytes" "fmt" "strings" "testing" - - "github.com/hashicorp/go-hclog" ) func TestPanicRecorder(t *testing.T) { @@ -52,31 +49,3 @@ func TestPanicLimit(t *testing.T) { } } } - -func TestLogPanicWrapper(t *testing.T) { - var buf bytes.Buffer - logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ - Name: "test", - Level: hclog.Debug, - Output: &buf, - DisableTime: true, - }) - - wrapped := (&logPanicWrapper{ - Logger: logger, - }).Named("test") - - wrapped.Debug("panic: invalid foo of bar") - wrapped.Debug("\tstack trace") - - expected := `[DEBUG] test.test: PANIC: invalid foo of bar -[DEBUG] test.test: stack trace -` - - got := buf.String() - - if expected != got { - t.Fatalf("Expected:\n%q\nGot:\n%q", expected, got) - } - -} diff --git a/internal/terraform/modsdir/manifest.go b/internal/terraform/modsdir/manifest.go index 2c15fa70..32610508 100644 --- a/internal/terraform/modsdir/manifest.go +++ b/internal/terraform/modsdir/manifest.go @@ -26,6 +26,9 @@ type Record struct { // This is used only to detect if the source was changed in configuration // since the module was last installed, which means that the installer // must re-install it. + // + // This should always be the result of calling method String on an + // addrs.ModuleSource value, to get a suitably-normalized result. SourceAddr string `json:"Source"` // Version is the exact version of the module, which results from parsing @@ -89,6 +92,20 @@ func ReadManifestSnapshot(r io.Reader) (Manifest, error) { } } + // Historically we didn't normalize the module source addresses when + // writing them into the manifest, and so we'll make a best effort + // to normalize them back in on read so that we can just gracefully + // upgrade on the next "terraform init". + if record.SourceAddr != "" { + if addr, err := addrs.ParseModuleSource(record.SourceAddr); err == nil { + // This is a best effort sort of thing. If the source + // address isn't valid then we'll just leave it as-is + // and let another component detect that downstream, + // to preserve the old behavior in that case. + record.SourceAddr = addr.String() + } + } + // Ensure Windows is using the proper modules path format after // reading the modules manifest Dir records record.Dir = filepath.FromSlash(record.Dir) diff --git a/internal/terraform/providers/addressed_types.go b/internal/terraform/providers/addressed_types.go new file mode 100644 index 00000000..877c3d97 --- /dev/null +++ b/internal/terraform/providers/addressed_types.go @@ -0,0 +1,33 @@ +package providers + +import ( + "sort" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" +) + +// AddressedTypesAbs is a helper that extracts all of the distinct provider +// types from the given list of absolute provider configuration addresses. +func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []addrs.Provider { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]addrs.Provider{} + for _, addr := range providerAddrs { + m[addr.Provider.String()] = addr.Provider + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + + ret := make([]addrs.Provider, len(names)) + for i, name := range names { + ret[i] = m[name] + } + + return ret +} diff --git a/internal/terraform/providers/addressed_types_test.go b/internal/terraform/providers/addressed_types_test.go new file mode 100644 index 00000000..41b681bc --- /dev/null +++ b/internal/terraform/providers/addressed_types_test.go @@ -0,0 +1,45 @@ +package providers + +import ( + "testing" + + "github.com/go-test/deep" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" +) + +func TestAddressedTypesAbs(t *testing.T) { + providerAddrs := []addrs.AbsProviderConfig{ + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("aws"), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("aws"), + Alias: "foo", + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("azure"), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }, + } + + got := AddressedTypesAbs(providerAddrs) + want := []addrs.Provider{ + addrs.NewDefaultProvider("aws"), + addrs.NewDefaultProvider("azure"), + addrs.NewDefaultProvider("null"), + } + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} diff --git a/internal/terraform/providers/doc.go b/internal/terraform/providers/doc.go new file mode 100644 index 00000000..39aa1de6 --- /dev/null +++ b/internal/terraform/providers/doc.go @@ -0,0 +1,3 @@ +// Package providers contains the interface and primary types required to +// implement a Terraform resource provider. +package providers diff --git a/internal/terraform/providers/factory.go b/internal/terraform/providers/factory.go new file mode 100644 index 00000000..52700633 --- /dev/null +++ b/internal/terraform/providers/factory.go @@ -0,0 +1,63 @@ +package providers + +// Factory is a function type that creates a new instance of a resource +// provider, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provider. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} + +// ProviderHasResource is a helper that requests schema from the given provider +// and checks if it has a resource type of the given name. +// +// This function is more expensive than it may first appear since it must +// retrieve the entire schema from the underlying provider, and so it should +// be used sparingly and especially not in tight loops. +// +// Since retrieving the provider may fail (e.g. if the provider is accessed +// over an RPC channel that has operational problems), this function will +// return false if the schema cannot be retrieved, under the assumption that +// a subsequent call to do anything with the resource type would fail +// anyway. +func ProviderHasResource(provider Interface, typeName string) bool { + resp := provider.GetProviderSchema() + if resp.Diagnostics.HasErrors() { + return false + } + + _, exists := resp.ResourceTypes[typeName] + return exists +} + +// ProviderHasDataSource is a helper that requests schema from the given +// provider and checks if it has a data source of the given name. +// +// This function is more expensive than it may first appear since it must +// retrieve the entire schema from the underlying provider, and so it should +// be used sparingly and especially not in tight loops. +// +// Since retrieving the provider may fail (e.g. if the provider is accessed +// over an RPC channel that has operational problems), this function will +// return false if the schema cannot be retrieved, under the assumption that +// a subsequent call to do anything with the data source would fail +// anyway. +func ProviderHasDataSource(provider Interface, dataSourceName string) bool { + resp := provider.GetProviderSchema() + if resp.Diagnostics.HasErrors() { + return false + } + + _, exists := resp.DataSources[dataSourceName] + return exists +} diff --git a/internal/terraform/providers/provider.go b/internal/terraform/providers/provider.go new file mode 100644 index 00000000..24bf6f77 --- /dev/null +++ b/internal/terraform/providers/provider.go @@ -0,0 +1,380 @@ +package providers + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/camptocamp/terraboard/internal/terraform/states" + "github.com/camptocamp/terraboard/internal/terraform/tfdiags" +) + +// Interface represents the set of methods required for a complete resource +// provider plugin. +type Interface interface { + // GetSchema returns the complete schema for the provider. + GetProviderSchema() GetProviderSchemaResponse + + // ValidateProviderConfig allows the provider to validate the configuration. + // The ValidateProviderConfigResponse.PreparedConfig field is unused. The + // final configuration is not stored in the state, and any modifications + // that need to be made must be made during the Configure method call. + ValidateProviderConfig(ValidateProviderConfigRequest) ValidateProviderConfigResponse + + // ValidateResourceConfig allows the provider to validate the resource + // configuration values. + ValidateResourceConfig(ValidateResourceConfigRequest) ValidateResourceConfigResponse + + // ValidateDataResourceConfig allows the provider to validate the data source + // configuration values. + ValidateDataResourceConfig(ValidateDataResourceConfigRequest) ValidateDataResourceConfigResponse + + // UpgradeResourceState is called when the state loader encounters an + // instance state whose schema version is less than the one reported by the + // currently-used version of the corresponding provider, and the upgraded + // result is used for any further processing. + UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse + + // Configure configures and initialized the provider. + ConfigureProvider(ConfigureProviderRequest) ConfigureProviderResponse + + // Stop is called when the provider should halt any in-flight actions. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provider after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // ReadResource refreshes a resource and returns its current state. + ReadResource(ReadResourceRequest) ReadResourceResponse + + // PlanResourceChange takes the current state and proposed state of a + // resource, and returns the planned final state. + PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse + + // ApplyResourceChange takes the planned state for a resource, which may + // yet contain unknown computed values, and applies the changes returning + // the final state. + ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse + + // ImportResourceState requests that the given resource be imported. + ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse + + // ReadDataSource returns the data source's current state. + ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetProviderSchemaResponse struct { + // Provider is the schema for the provider itself. + Provider Schema + + // ProviderMeta is the schema for the provider's meta info in a module + ProviderMeta Schema + + // ResourceTypes map the resource type name to that type's schema. + ResourceTypes map[string]Schema + + // DataSources maps the data source name to that data source's schema. + DataSources map[string]Schema + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateProviderConfigRequest struct { + // Config is the raw configuration value for the provider. + Config cty.Value +} + +type ValidateProviderConfigResponse struct { + // PreparedConfig is unused and will be removed with support for plugin protocol v5. + PreparedConfig cty.Value + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateResourceConfigRequest struct { + // TypeName is the name of the resource type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateResourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateDataResourceConfigRequest struct { + // TypeName is the name of the data source type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateDataResourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type UpgradeResourceStateRequest struct { + // TypeName is the name of the resource type being upgraded + TypeName string + + // Version is version of the schema that created the current state. + Version int64 + + // RawStateJSON and RawStateFlatmap contiain the state that needs to be + // upgraded to match the current schema version. Because the schema is + // unknown, this contains only the raw data as stored in the state. + // RawStateJSON is the current json state encoding. + // RawStateFlatmap is the legacy flatmap encoding. + // Only on of these fields may be set for the upgrade request. + RawStateJSON []byte + RawStateFlatmap map[string]string +} + +type UpgradeResourceStateResponse struct { + // UpgradedState is the newly upgraded resource state. + UpgradedState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ConfigureProviderRequest struct { + // Terraform version is the version string from the running instance of + // terraform. Providers can use TerraformVersion to verify compatibility, + // and to store for informational purposes. + TerraformVersion string + + // Config is the complete configuration value for the provider. + Config cty.Value +} + +type ConfigureProviderResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ReadResourceRequest struct { + // TypeName is the name of the resource type being read. + TypeName string + + // PriorState contains the previously saved state value for this resource. + PriorState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ReadResourceResponse struct { + // NewState contains the current state of the resource. + NewState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type PlanResourceChangeRequest struct { + // TypeName is the name of the resource type to plan. + TypeName string + + // PriorState is the previously saved state value for this resource. + PriorState cty.Value + + // ProposedNewState is the expected state after the new configuration is + // applied. This is created by directly applying the configuration to the + // PriorState. The provider is then responsible for applying any further + // changes required to create the proposed final state. + ProposedNewState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the ProposedNewState in most circumstances. + Config cty.Value + + // PriorPrivate is the previously saved private data returned from the + // provider during the last apply. + PriorPrivate []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type PlanResourceChangeResponse struct { + // PlannedState is the expected state of the resource once the current + // configuration is applied. + PlannedState cty.Value + + // RequiresReplace is the list of the attributes that are requiring + // resource replacement. + RequiresReplace []cty.Path + + // PlannedPrivate is an opaque blob that is not interpreted by terraform + // core. This will be saved and relayed back to the provider during + // ApplyResourceChange. + PlannedPrivate []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ApplyResourceChangeRequest struct { + // TypeName is the name of the resource type being applied. + TypeName string + + // PriorState is the current state of resource. + PriorState cty.Value + + // Planned state is the state returned from PlanResourceChange, and should + // represent the new state, minus any remaining computed attributes. + PlannedState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the PlannedState in most circumstances. + Config cty.Value + + // PlannedPrivate is the same value as returned by PlanResourceChange. + PlannedPrivate []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ApplyResourceChangeResponse struct { + // NewState is the new complete state after applying the planned change. + // In the event of an error, NewState should represent the most recent + // known state of the resource, if it exists. + NewState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ImportResourceStateRequest struct { + // TypeName is the name of the resource type to be imported. + TypeName string + + // ID is a string with which the provider can identify the resource to be + // imported. + ID string +} + +type ImportResourceStateResponse struct { + // ImportedResources contains one or more state values related to the + // imported resource. It is not required that these be complete, only that + // there is enough identifying information for the provider to successfully + // update the states in ReadResource. + ImportedResources []ImportedResource + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// ImportedResource represents an object being imported into Terraform with the +// help of a provider. An ImportedObject is a RemoteObject that has been read +// by the provider's import handler but hasn't yet been committed to state. +type ImportedResource struct { + // TypeName is the name of the resource type associated with the + // returned state. It's possible for providers to import multiple related + // types with a single import request. + TypeName string + + // State is the state of the remote object being imported. This may not be + // complete, but must contain enough information to uniquely identify the + // resource. + State cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +// AsInstanceObject converts the receiving ImportedObject into a +// ResourceInstanceObject that has status ObjectReady. +// +// The returned object does not know its own resource type, so the caller must +// retain the ResourceType value from the source object if this information is +// needed. +// +// The returned object also has no dependency addresses, but the caller may +// freely modify the direct fields of the returned object without affecting +// the receiver. +func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { + return &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: ir.State, + Private: ir.Private, + } +} + +type ReadDataSourceRequest struct { + // TypeName is the name of the data source type to Read. + TypeName string + + // Config is the complete configuration for the requested data source. + Config cty.Value + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ReadDataSourceResponse struct { + // State is the current state of the requested data source. + State cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/internal/terraform/providers/schemas.go b/internal/terraform/providers/schemas.go new file mode 100644 index 00000000..c328f038 --- /dev/null +++ b/internal/terraform/providers/schemas.go @@ -0,0 +1,62 @@ +package providers + +import ( + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/configs/configschema" +) + +// Schemas is an overall container for all of the schemas for all configurable +// objects defined within a particular provider. +// +// The schema for each individual configurable object is represented by nested +// instances of type Schema (singular) within this data structure. +// +// This type used to be known as terraform.ProviderSchema, but moved out here +// as part of our ongoing efforts to shrink down the "terraform" package. +// There's still a type alias at the old name, but we should prefer using +// providers.Schema in new code. However, a consequence of this transitional +// situation is that the "terraform" package still has the responsibility for +// constructing a providers.Schemas object based on responses from the provider +// API; hopefully we'll continue this refactor later so that functions in this +// package totally encapsulate the unmarshalling and include this as part of +// providers.GetProviderSchemaResponse. +type Schemas struct { + Provider *configschema.Block + ProviderMeta *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ss *Schemas) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ss.ResourceTypes[typeName], ss.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ss.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ss *Schemas) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ss.SchemaForResourceType(addr.Mode, addr.Type) +} + +// Schema pairs a provider or resource schema with that schema's version. +// This is used to be able to upgrade the schema in UpgradeResourceState. +// +// This describes the schema for a single object within a provider. Type +// "Schemas" (plural) instead represents the overall collection of schemas +// for everything within a particular provider. +type Schema struct { + Version int64 + Block *configschema.Block +} diff --git a/internal/terraform/registry/client.go b/internal/terraform/registry/client.go index ec0f3128..879bd2d5 100644 --- a/internal/terraform/registry/client.go +++ b/internal/terraform/registry/client.go @@ -1,6 +1,7 @@ package registry import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -109,7 +110,7 @@ func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, er } // ModuleVersions queries the registry for a module, and returns the available versions. -func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { +func (c *Client) ModuleVersions(ctx context.Context, module *regsrc.Module) (*response.ModuleVersions, error) { host, err := module.SvcHost() if err != nil { return nil, err @@ -133,6 +134,7 @@ func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions if err != nil { return nil, err } + req = req.WithContext(ctx) c.addRequestCreds(host, req.Request) req.Header.Set(xTerraformVersion, tfVersion) @@ -182,7 +184,7 @@ func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { // ModuleLocation find the download location for a specific version module. // This returns a string, because the final location may contain special go-getter syntax. -func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) { +func (c *Client) ModuleLocation(ctx context.Context, module *regsrc.Module, version string) (string, error) { host, err := module.SvcHost() if err != nil { return "", err @@ -211,6 +213,8 @@ func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, return "", err } + req = req.WithContext(ctx) + c.addRequestCreds(host, req.Request) req.Header.Set(xTerraformVersion, tfVersion) @@ -295,7 +299,7 @@ func maxRetryErrorHandler(resp *http.Response, err error, numTries int) (*http.R // both response and error. var errMsg string if resp != nil { - errMsg = fmt.Sprintf(": %d", resp.StatusCode) + errMsg = fmt.Sprintf(": %s returned from %s", resp.Status, resp.Request.URL) } else if err != nil { errMsg = fmt.Sprintf(": %s", err) } diff --git a/internal/terraform/registry/client_test.go b/internal/terraform/registry/client_test.go index bb5cfc60..5f36f49a 100644 --- a/internal/terraform/registry/client_test.go +++ b/internal/terraform/registry/client_test.go @@ -103,7 +103,7 @@ func TestLookupModuleVersions(t *testing.T) { t.Fatal(err) } - resp, err := client.ModuleVersions(modsrc) + resp, err := client.ModuleVersions(context.Background(), modsrc) if err != nil { t.Fatal(err) } @@ -143,7 +143,7 @@ func TestInvalidRegistry(t *testing.T) { t.Fatal(err) } - if _, err := client.ModuleVersions(modsrc); err == nil { + if _, err := client.ModuleVersions(context.Background(), modsrc); err == nil { t.Fatal("expected error") } } @@ -160,11 +160,11 @@ func TestRegistryAuth(t *testing.T) { t.Fatal(err) } - _, err = client.ModuleVersions(mod) + _, err = client.ModuleVersions(context.Background(), mod) if err != nil { t.Fatal(err) } - _, err = client.ModuleLocation(mod, "1.0.0") + _, err = client.ModuleLocation(context.Background(), mod, "1.0.0") if err != nil { t.Fatal(err) } @@ -173,11 +173,11 @@ func TestRegistryAuth(t *testing.T) { client.services.SetCredentialsSource(nil) // both should fail without auth - _, err = client.ModuleVersions(mod) + _, err = client.ModuleVersions(context.Background(), mod) if err == nil { t.Fatal("expected error") } - _, err = client.ModuleLocation(mod, "1.0.0") + _, err = client.ModuleLocation(context.Background(), mod, "1.0.0") if err == nil { t.Fatal("expected error") } @@ -195,7 +195,7 @@ func TestLookupModuleLocationRelative(t *testing.T) { t.Fatal(err) } - got, err := client.ModuleLocation(mod, "0.2.0") + got, err := client.ModuleLocation(context.Background(), mod, "0.2.0") if err != nil { t.Fatal(err) } @@ -224,7 +224,7 @@ func TestAccLookupModuleVersions(t *testing.T) { } s := NewClient(regDisco, nil) - resp, err := s.ModuleVersions(modsrc) + resp, err := s.ModuleVersions(context.Background(), modsrc) if err != nil { t.Fatal(err) } @@ -277,7 +277,7 @@ func TestLookupLookupModuleError(t *testing.T) { return oldCheck(ctx, resp, err) } - _, err = client.ModuleLocation(mod, "0.2.0") + _, err = client.ModuleLocation(context.Background(), mod, "0.2.0") if err == nil { t.Fatal("expected error") } @@ -299,7 +299,7 @@ func TestLookupModuleRetryError(t *testing.T) { if err != nil { t.Fatal(err) } - resp, err := client.ModuleVersions(modsrc) + resp, err := client.ModuleVersions(context.Background(), modsrc) if err == nil { t.Fatal("expected requests to exceed retry", err) } @@ -328,7 +328,7 @@ func TestLookupModuleNoRetryError(t *testing.T) { if err != nil { t.Fatal(err) } - resp, err := client.ModuleVersions(modsrc) + resp, err := client.ModuleVersions(context.Background(), modsrc) if err == nil { t.Fatal("expected request to fail", err) } @@ -354,7 +354,7 @@ func TestLookupModuleNetworkError(t *testing.T) { if err != nil { t.Fatal(err) } - resp, err := client.ModuleVersions(modsrc) + resp, err := client.ModuleVersions(context.Background(), modsrc) if err == nil { t.Fatal("expected request to fail", err) } diff --git a/internal/terraform/registry/regsrc/module.go b/internal/terraform/registry/regsrc/module.go index c3edd7d8..cc518a2e 100644 --- a/internal/terraform/registry/regsrc/module.go +++ b/internal/terraform/registry/regsrc/module.go @@ -6,7 +6,8 @@ import ( "regexp" "strings" - "github.com/hashicorp/terraform-svchost" + "github.com/camptocamp/terraboard/internal/terraform/addrs" + svchost "github.com/hashicorp/terraform-svchost" ) var ( @@ -89,6 +90,45 @@ func NewModule(host, namespace, name, provider, submodule string) (*Module, erro return m, nil } +// ModuleFromModuleSourceAddr is an adapter to automatically transform the +// modern representation of registry module addresses, +// addrs.ModuleSourceRegistry, into the legacy representation regsrc.Module. +// +// Note that the new-style model always does normalization during parsing and +// does not preserve the raw user input at all, and so although the fields +// of regsrc.Module are all called "Raw...", initializing a Module indirectly +// through an addrs.ModuleSourceRegistry will cause those values to be the +// normalized ones, not the raw user input. +// +// Use this only for temporary shims to call into existing code that still +// uses regsrc.Module. Eventually all other subsystems should be updated to +// use addrs.ModuleSourceRegistry instead, and then package regsrc can be +// removed altogether. +func ModuleFromModuleSourceAddr(addr addrs.ModuleSourceRegistry) *Module { + ret := ModuleFromRegistryPackageAddr(addr.PackageAddr) + ret.RawSubmodule = addr.Subdir + return ret +} + +// ModuleFromRegistryPackageAddr is similar to ModuleFromModuleSourceAddr, but +// it works with just the isolated registry package address, and not the +// full source address. +// +// The practical implication of that is that RawSubmodule will always be +// the empty string in results from this function, because "Submodule" maps +// to "Subdir" and that's a module source address concept, not a module +// package concept. In practice this typically doesn't matter because the +// registry client ignores the RawSubmodule field anyway; that's a concern +// for the higher-level module installer to deal with. +func ModuleFromRegistryPackageAddr(addr addrs.ModuleRegistryPackage) *Module { + return &Module{ + RawHost: NewFriendlyHost(addr.Host.String()), + RawNamespace: addr.Namespace, + RawName: addr.Name, + RawProvider: addr.TargetSystem, // this field was never actually enforced to be a provider address, so now has a more general name + } +} + // ParseModuleSource attempts to parse source as a Terraform registry module // source. If the string is not found to be in a valid format, // ErrInvalidModuleSource is returned. Note that this can only be used on diff --git a/internal/terraform/replacefile/doc.go b/internal/terraform/replacefile/doc.go new file mode 100644 index 00000000..8a1fbd99 --- /dev/null +++ b/internal/terraform/replacefile/doc.go @@ -0,0 +1,12 @@ +// Package replacefile is a small helper package focused directly at the +// problem of atomically "renaming" one file over another one. +// +// On Unix systems this is the standard behavior of the rename function, but +// the equivalent operation on Windows requires some specific operation flags +// which this package encapsulates. +// +// This package uses conditional compilation to select a different +// implementation for Windows vs. all other platforms. It may therefore +// require further fiddling in future if Terraform is ported to another +// OS that is neither Unix-like nor Windows. +package replacefile diff --git a/internal/terraform/replacefile/replacefile_unix.go b/internal/terraform/replacefile/replacefile_unix.go new file mode 100644 index 00000000..f0483c00 --- /dev/null +++ b/internal/terraform/replacefile/replacefile_unix.go @@ -0,0 +1,25 @@ +//go:build !windows +// +build !windows + +package replacefile + +import ( + "os" +) + +// AtomicRename renames from the source path to the destination path, +// atomically replacing any file that might already exist at the destination. +// +// Typically this operation can succeed only if the source and destination +// are within the same physical filesystem, so this function is best reserved +// for cases where the source and destination exist in the same directory and +// only the local filename differs between them. +// +// The Unix implementation of AtomicRename relies on the atomicity of renaming +// that is required by the ISO C standard, which in turn assumes that Go's +// implementation of rename is calling into a system call that preserves that +// guarantee. +func AtomicRename(source, destination string) error { + // On Unix systems, a rename is sufficiently atomic. + return os.Rename(source, destination) +} diff --git a/internal/terraform/replacefile/replacefile_windows.go b/internal/terraform/replacefile/replacefile_windows.go new file mode 100644 index 00000000..e45f35eb --- /dev/null +++ b/internal/terraform/replacefile/replacefile_windows.go @@ -0,0 +1,41 @@ +//go:build windows +// +build windows + +package replacefile + +import ( + "os" + "syscall" + + "golang.org/x/sys/windows" +) + +// AtomicRename renames from the source path to the destination path, +// atomically replacing any file that might already exist at the destination. +// +// Typically this operation can succeed only if the source and destination +// are within the same physical filesystem, so this function is best reserved +// for cases where the source and destination exist in the same directory and +// only the local filename differs between them. +func AtomicRename(source, destination string) error { + // On Windows, renaming one file over another is not atomic and certain + // error conditions can result in having only the source file and nothing + // at the destination file. Instead, we need to call into the MoveFileEx + // Windows API function, setting two flags to opt in to replacing an + // existing file. + srcPtr, err := syscall.UTF16PtrFromString(source) + if err != nil { + return &os.LinkError{"replace", source, destination, err} + } + destPtr, err := syscall.UTF16PtrFromString(destination) + if err != nil { + return &os.LinkError{"replace", source, destination, err} + } + + flags := uint32(windows.MOVEFILE_REPLACE_EXISTING | windows.MOVEFILE_WRITE_THROUGH) + err = windows.MoveFileEx(srcPtr, destPtr, flags) + if err != nil { + return &os.LinkError{"replace", source, destination, err} + } + return nil +} diff --git a/internal/terraform/replacefile/writefile.go b/internal/terraform/replacefile/writefile.go new file mode 100644 index 00000000..d4dfb8ec --- /dev/null +++ b/internal/terraform/replacefile/writefile.go @@ -0,0 +1,77 @@ +package replacefile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// AtomicWriteFile uses a temporary file along with this package's AtomicRename +// function in order to provide a replacement for ioutil.WriteFile that +// writes the given file into place as atomically as the underlying operating +// system can support. +// +// The sense of "atomic" meant by this function is that the file at the +// given filename will either contain the entirety of the previous contents +// or the entirety of the given data array if opened and read at any point +// during the execution of the function. +// +// On some platforms attempting to overwrite a file that has at least one +// open filehandle will produce an error. On other platforms, the overwriting +// will succeed but existing open handles will still refer to the old file, +// even though its directory entry is no longer present. +// +// Although AtomicWriteFile tries its best to avoid leaving behind its +// temporary file on error, some particularly messy error cases may result +// in a leftover temporary file. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + dir, file := filepath.Split(filename) + if dir == "" { + // If the file is in the current working directory then dir will + // end up being "", but that's not right here because TempFile + // treats an empty dir as meaning "use the TMPDIR environment variable". + dir = "." + } + f, err := ioutil.TempFile(dir, file) // alongside target file and with a similar name + if err != nil { + return fmt.Errorf("cannot create temporary file to update %s: %s", filename, err) + } + tmpName := f.Name() + moved := false + defer func(f *os.File, name string) { + // Remove the temporary file if it hasn't been moved yet. We're + // ignoring errors here because there's nothing we can do about + // them anyway. + if !moved { + os.Remove(name) + } + }(f, tmpName) + + // We'll try to apply the requested permissions. This may + // not be effective on all platforms, but should at least work on + // Unix-like targets and should be harmless elsewhere. + if err := os.Chmod(tmpName, perm); err != nil { + return fmt.Errorf("cannot set mode for temporary file %s: %s", tmpName, err) + } + + // Write the credentials to the temporary file, then immediately close + // it, whether or not the write succeeds. Note that closing the file here + // is required because on Windows we can't move a file while it's open. + _, err = f.Write(data) + f.Close() + if err != nil { + return fmt.Errorf("cannot write to temporary file %s: %s", tmpName, err) + } + + // Temporary file now replaces the original file, as atomically as + // possible. (At the very least, we should not end up with a file + // containing only a partial JSON object.) + err = AtomicRename(tmpName, filename) + if err != nil { + return fmt.Errorf("failed to replace %s with temporary file %s: %s", filename, tmpName, err) + } + + moved = true + return nil +} diff --git a/internal/terraform/states/instance_object.go b/internal/terraform/states/instance_object.go index 48ce1cbe..47c79ea4 100644 --- a/internal/terraform/states/instance_object.go +++ b/internal/terraform/states/instance_object.go @@ -1,6 +1,8 @@ package states import ( + "sort" + "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" @@ -108,13 +110,25 @@ func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*Res return nil, err } + // Dependencies are collected and merged in an unordered format (using map + // keys as a set), then later changed to a slice (in random ordering) to be + // stored in state as an array. To avoid pointless thrashing of state in + // refresh-only runs, we can either override comparison of dependency lists + // (more desirable, but tricky for Reasons) or just sort when encoding. + // Encoding of instances can happen concurrently, so we must copy the + // dependencies to avoid mutating what may be a shared array of values. + dependencies := make([]addrs.ConfigResource, len(o.Dependencies)) + copy(dependencies, o.Dependencies) + + sort.Slice(dependencies, func(i, j int) bool { return dependencies[i].String() < dependencies[j].String() }) + return &ResourceInstanceObjectSrc{ SchemaVersion: schemaVersion, AttrsJSON: src, AttrSensitivePaths: pvm, Private: o.Private, Status: o.Status, - Dependencies: o.Dependencies, + Dependencies: dependencies, CreateBeforeDestroy: o.CreateBeforeDestroy, }, nil } diff --git a/internal/terraform/states/instance_object_test.go b/internal/terraform/states/instance_object_test.go new file mode 100644 index 00000000..f380c00e --- /dev/null +++ b/internal/terraform/states/instance_object_test.go @@ -0,0 +1,83 @@ +package states + +import ( + "sync" + "testing" + + "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceInstanceObject_encode(t *testing.T) { + value := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.True, + }) + // The in-memory order of resource dependencies is random, since they're an + // unordered set. + depsOne := []addrs.ConfigResource{ + addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "honk"), + addrs.RootModule.Child("child").Resource(addrs.ManagedResourceMode, "test", "flub"), + addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "boop"), + } + depsTwo := []addrs.ConfigResource{ + addrs.RootModule.Child("child").Resource(addrs.ManagedResourceMode, "test", "flub"), + addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "boop"), + addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "honk"), + } + + // multiple instances may have been assigned the same deps slice + objs := []*ResourceInstanceObject{ + &ResourceInstanceObject{ + Value: value, + Status: ObjectPlanned, + Dependencies: depsOne, + }, + &ResourceInstanceObject{ + Value: value, + Status: ObjectPlanned, + Dependencies: depsTwo, + }, + &ResourceInstanceObject{ + Value: value, + Status: ObjectPlanned, + Dependencies: depsOne, + }, + &ResourceInstanceObject{ + Value: value, + Status: ObjectPlanned, + Dependencies: depsOne, + }, + } + + var encoded []*ResourceInstanceObjectSrc + + // Encoding can happen concurrently, so we need to make sure the shared + // Dependencies are safely handled + var wg sync.WaitGroup + var mu sync.Mutex + + for _, obj := range objs { + obj := obj + wg.Add(1) + go func() { + defer wg.Done() + rios, err := obj.Encode(value.Type(), 0) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + mu.Lock() + encoded = append(encoded, rios) + mu.Unlock() + }() + } + wg.Wait() + + // However, identical sets of dependencies should always be written to state + // in an identical order, so we don't do meaningless state updates on refresh. + for i := 0; i < len(encoded)-1; i++ { + if diff := cmp.Diff(encoded[i].Dependencies, encoded[i+1].Dependencies); diff != "" { + t.Errorf("identical dependencies got encoded in different orders:\n%s", diff) + } + } +} diff --git a/internal/terraform/states/state.go b/internal/terraform/states/state.go index 617fdf31..79329895 100644 --- a/internal/terraform/states/state.go +++ b/internal/terraform/states/state.go @@ -1,6 +1,7 @@ package states import ( + "fmt" "sort" "github.com/zclconf/go-cty/cty" @@ -150,15 +151,27 @@ func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { return ms } -// HasResources returns true if there is at least one resource (of any mode) -// present in the receiving state. -func (s *State) HasResources() bool { +// HasManagedResourceInstanceObjects returns true if there is at least one +// resource instance object (current or deposed) associated with a managed +// resource in the receiving state. +// +// A true result would suggest that just discarding this state without first +// destroying these objects could leave "dangling" objects in remote systems, +// no longer tracked by any Terraform state. +func (s *State) HasManagedResourceInstanceObjects() bool { if s == nil { return false } for _, ms := range s.Modules { - if len(ms.Resources) > 0 { - return true + for _, rs := range ms.Resources { + if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { + continue + } + for _, is := range rs.Instances { + if is.Current != nil || len(is.Deposed) != 0 { + return true + } + } } } return false @@ -186,6 +199,74 @@ func (s *State) Resources(addr addrs.ConfigResource) []*Resource { return ret } +// AllManagedResourceInstanceObjectAddrs returns a set of addresses for all of +// the leaf resource instance objects associated with managed resources that +// are tracked in this state. +// +// This result is the set of objects that would be effectively "forgotten" +// (like "terraform state rm") if this state were totally discarded, such as +// by deleting a workspace. This function is intended only for reporting +// context in error messages, such as when we reject deleting a "non-empty" +// workspace as detected by s.HasManagedResourceInstanceObjects. +// +// The ordering of the result is meaningless but consistent. DeposedKey will +// be NotDeposed (the zero value of DeposedKey) for any "current" objects. +// This method is guaranteed to return at least one item if +// s.HasManagedResourceInstanceObjects returns true for the same state, and +// to return a zero-length slice if it returns false. +func (s *State) AllResourceInstanceObjectAddrs() []struct { + Instance addrs.AbsResourceInstance + DeposedKey DeposedKey +} { + if s == nil { + return nil + } + + // We use an unnamed return type here just because we currently have no + // general need to return pairs of instance address and deposed key aside + // from this method, and this method itself is only of marginal value + // when producing some error messages. + // + // If that need ends up arising more in future then it might make sense to + // name this as addrs.AbsResourceInstanceObject, although that would require + // moving DeposedKey into the addrs package too. + type ResourceInstanceObject = struct { + Instance addrs.AbsResourceInstance + DeposedKey DeposedKey + } + var ret []ResourceInstanceObject + + for _, ms := range s.Modules { + for _, rs := range ms.Resources { + if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { + continue + } + + for instKey, is := range rs.Instances { + instAddr := rs.Addr.Instance(instKey) + if is.Current != nil { + ret = append(ret, ResourceInstanceObject{instAddr, NotDeposed}) + } + for deposedKey := range is.Deposed { + ret = append(ret, ResourceInstanceObject{instAddr, deposedKey}) + } + } + } + } + + sort.SliceStable(ret, func(i, j int) bool { + objI, objJ := ret[i], ret[j] + switch { + case !objI.Instance.Equal(objJ.Instance): + return objI.Instance.Less(objJ.Instance) + default: + return objI.DeposedKey < objJ.DeposedKey + } + }) + + return ret +} + // ResourceInstance returns the state for the resource instance with the given // address, or nil if no such resource is tracked in the state. func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { @@ -296,3 +377,245 @@ func (s *State) SyncWrapper() *SyncState { state: s, } } + +// MoveAbsResource moves the given src AbsResource's current state to the new +// dst address. This will panic if the src AbsResource does not exist in state, +// or if there is already a resource at the dst address. It is the caller's +// responsibility to verify the validity of the move (for example, that the src +// and dst are compatible types). +func (s *State) MoveAbsResource(src, dst addrs.AbsResource) { + // verify that the src address exists and the dst address does not + rs := s.Resource(src) + if rs == nil { + panic(fmt.Sprintf("no state for src address %s", src.String())) + } + + ds := s.Resource(dst) + if ds != nil { + panic(fmt.Sprintf("dst resource %s already exists", dst.String())) + } + + ms := s.Module(src.Module) + ms.RemoveResource(src.Resource) + + // Remove the module if it is empty (and not root) after removing the + // resource. + if !ms.Addr.IsRoot() && ms.empty() { + s.RemoveModule(src.Module) + } + + // Update the address before adding it to the state + rs.Addr = dst + s.EnsureModule(dst.Module).Resources[dst.Resource.String()] = rs +} + +// MaybeMoveAbsResource moves the given src AbsResource's current state to the +// new dst address. This function will succeed if both the src address does not +// exist in state and the dst address does; the return value indicates whether +// or not the move occured. This function will panic if either the src does not +// exist or the dst does exist (but not both). +func (s *State) MaybeMoveAbsResource(src, dst addrs.AbsResource) bool { + // Get the source and destinatation addresses from state. + rs := s.Resource(src) + ds := s.Resource(dst) + + // Normal case: the src exists in state, dst does not + if rs != nil && ds == nil { + s.MoveAbsResource(src, dst) + return true + } + + if rs == nil && ds != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information for the caller. + return false + } else { + panic("invalid move") + } +} + +// MoveAbsResourceInstance moves the given src AbsResourceInstance's current state to +// the new dst address. This will panic if the src AbsResourceInstance does not +// exist in state, or if there is already a resource at the dst address. It is +// the caller's responsibility to verify the validity of the move (for example, +// that the src and dst are compatible types). +func (s *State) MoveAbsResourceInstance(src, dst addrs.AbsResourceInstance) { + srcInstanceState := s.ResourceInstance(src) + if srcInstanceState == nil { + panic(fmt.Sprintf("no state for src address %s", src.String())) + } + + dstInstanceState := s.ResourceInstance(dst) + if dstInstanceState != nil { + panic(fmt.Sprintf("dst resource %s already exists", dst.String())) + } + + srcResourceState := s.Resource(src.ContainingResource()) + srcProviderAddr := srcResourceState.ProviderConfig + dstResourceAddr := dst.ContainingResource() + + // Remove the source resource instance from the module's state, and then the + // module if empty. + ms := s.Module(src.Module) + ms.ForgetResourceInstanceAll(src.Resource) + if !ms.Addr.IsRoot() && ms.empty() { + s.RemoveModule(src.Module) + } + + dstModule := s.EnsureModule(dst.Module) + + // See if there is already a resource we can add this instance to. + dstResourceState := s.Resource(dstResourceAddr) + if dstResourceState == nil { + // If we're moving to an address without an index then that + // suggests the user's intent is to establish both the + // resource and the instance at the same time (since the + // address covers both). If there's an index in the + // target then allow creating the new instance here. + dstModule.SetResourceProvider( + dstResourceAddr.Resource, + srcProviderAddr, // in this case, we bring the provider along as if we were moving the whole resource + ) + dstResourceState = dstModule.Resource(dstResourceAddr.Resource) + } + + dstResourceState.Instances[dst.Resource.Key] = srcInstanceState +} + +// MaybeMoveAbsResourceInstance moves the given src AbsResourceInstance's +// current state to the new dst address. This function will succeed if both the +// src address does not exist in state and the dst address does; the return +// value indicates whether or not the move occured. This function will panic if +// either the src does not exist or the dst does exist (but not both). +func (s *State) MaybeMoveAbsResourceInstance(src, dst addrs.AbsResourceInstance) bool { + // get the src and dst resource instances from state + rs := s.ResourceInstance(src) + ds := s.ResourceInstance(dst) + + // Normal case: the src exists in state, dst does not + if rs != nil && ds == nil { + s.MoveAbsResourceInstance(src, dst) + return true + } + + if rs == nil && ds != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information. + return false + } else { + panic("invalid move") + } +} + +// MoveModuleInstance moves the given src ModuleInstance's current state to the +// new dst address. This will panic if the src ModuleInstance does not +// exist in state, or if there is already a resource at the dst address. It is +// the caller's responsibility to verify the validity of the move. +func (s *State) MoveModuleInstance(src, dst addrs.ModuleInstance) { + if src.IsRoot() || dst.IsRoot() { + panic("cannot move to or from root module") + } + + srcMod := s.Module(src) + if srcMod == nil { + panic(fmt.Sprintf("no state for src module %s", src.String())) + } + + dstMod := s.Module(dst) + if dstMod != nil { + panic(fmt.Sprintf("dst module %s already exists in state", dst.String())) + } + + s.RemoveModule(src) + + srcMod.Addr = dst + s.EnsureModule(dst) + s.Modules[dst.String()] = srcMod + + // Update any Resource's addresses. + if srcMod.Resources != nil { + for _, r := range srcMod.Resources { + r.Addr.Module = dst + } + } + + // Update any OutputValues's addresses. + if srcMod.OutputValues != nil { + for _, ov := range srcMod.OutputValues { + ov.Addr.Module = dst + } + } +} + +// MaybeMoveModuleInstance moves the given src ModuleInstance's current state to +// the new dst address. This function will succeed if both the src address does +// not exist in state and the dst address does; the return value indicates +// whether or not the move occured. This function will panic if either the src +// does not exist or the dst does exist (but not both). +func (s *State) MaybeMoveModuleInstance(src, dst addrs.ModuleInstance) bool { + if src.IsRoot() || dst.IsRoot() { + panic("cannot move to or from root module") + } + + srcMod := s.Module(src) + dstMod := s.Module(dst) + + // Normal case: the src exists in state, dst does not + if srcMod != nil && dstMod == nil { + s.MoveModuleInstance(src, dst) + return true + } + + if srcMod == nil || src.IsRoot() && dstMod != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information. + return false + } else { + panic("invalid move") + } +} + +// MoveModule takes a source and destination addrs.Module address, and moves all +// state Modules which are contained by the src address to the new address. +func (s *State) MoveModule(src, dst addrs.AbsModuleCall) { + if src.Module.IsRoot() || dst.Module.IsRoot() { + panic("cannot move to or from root module") + } + + // Modules only exist as ModuleInstances in state, so we need to check each + // state Module and see if it is contained by the src address to get a full + // list of modules to move. + var srcMIs []*Module + for _, module := range s.Modules { + if !module.Addr.IsRoot() { + if src.Module.TargetContains(module.Addr) { + srcMIs = append(srcMIs, module) + } + } + } + + if len(srcMIs) == 0 { + panic(fmt.Sprintf("no matching module instances found for src module %s", src.String())) + } + + for _, ms := range srcMIs { + newInst := make(addrs.ModuleInstance, len(ms.Addr)) + copy(newInst, ms.Addr) + if ms.Addr.IsDeclaredByCall(src) { + // Easy case: we just need to update the last step with the new name + newInst[len(newInst)-1].Name = dst.Call.Name + } else { + // Trickier: this Module is a submodule. we need to find and update + // only that appropriate step + for s := range newInst { + if newInst[s].Name == src.Call.Name { + newInst[s].Name = dst.Call.Name + } + } + } + s.MoveModuleInstance(ms.Addr, newInst) + } +} diff --git a/internal/terraform/states/state_equal.go b/internal/terraform/states/state_equal.go index aea0c7fa..f2534599 100644 --- a/internal/terraform/states/state_equal.go +++ b/internal/terraform/states/state_equal.go @@ -34,10 +34,10 @@ func (s *State) ManagedResourcesEqual(other *State) bool { return true } if s == nil { - return !other.HasResources() + return !other.HasManagedResourceInstanceObjects() } if other == nil { - return !s.HasResources() + return !s.HasManagedResourceInstanceObjects() } // If we get here then both states are non-nil. diff --git a/internal/terraform/states/state_test.go b/internal/terraform/states/state_test.go index e7c2ec92..f3944217 100644 --- a/internal/terraform/states/state_test.go +++ b/internal/terraform/states/state_test.go @@ -1,6 +1,7 @@ package states import ( + "fmt" "reflect" "testing" @@ -8,6 +9,7 @@ import ( "github.com/zclconf/go-cty/cty" "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" ) func TestState(t *testing.T) { @@ -262,7 +264,7 @@ func TestStateDeepCopy(t *testing.T) { AttrSensitivePaths: []cty.PathValueMarks{ { Path: cty.Path{cty.GetAttrStep{Name: "woozles"}}, - Marks: cty.NewValueMarks("sensitive"), + Marks: cty.NewValueMarks(marks.Sensitive), }, }, Private: []byte("private data"), @@ -291,3 +293,716 @@ func TestStateDeepCopy(t *testing.T) { t.Fatalf("\nexpected:\n%q\ngot:\n%q\n", state, stateCopy) } } + +func TestStateHasResourceInstanceObjects(t *testing.T) { + providerConfig := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.MustParseProviderSourceString("test/test"), + } + childModuleProviderConfig := addrs.AbsProviderConfig{ + Module: addrs.RootModule.Child("child"), + Provider: addrs.MustParseProviderSourceString("test/test"), + } + + tests := map[string]struct { + Setup func(ss *SyncState) + Want bool + }{ + "empty": { + func(ss *SyncState) {}, + false, + }, + "one current, ready object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + providerConfig, + ) + }, + true, + }, + "one current, ready object in child module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("module.child.test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + childModuleProviderConfig, + ) + }, + true, + }, + "one current, tainted object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + }, + true, + }, + "one deposed, ready object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceDeposed( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + DeposedKey("uhoh"), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + }, + true, + }, + "one empty resource husk in root module": { + func(ss *SyncState) { + // Current Terraform doesn't actually create resource husks + // as part of its everyday work, so this is a "should never + // happen" case but we'll test to make sure we're robust to + // it anyway, because this was a historical bug blocking + // "terraform workspace delete" and similar. + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + s := ss.Lock() + delete(s.Modules[""].Resources["test.foo"].Instances, addrs.NoKey) + ss.Unlock() + }, + false, + }, + "one current data resource object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("data.test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + providerConfig, + ) + }, + false, // data resources aren't managed resources, so they don't count + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + state := BuildState(test.Setup) + got := state.HasManagedResourceInstanceObjects() + if got != test.Want { + t.Errorf("wrong result\nstate content: (using legacy state string format; might not be comprehensive)\n%s\n\ngot: %t\nwant: %t", state, got, test.Want) + } + }) + } + +} + +func TestState_MoveAbsResource(t *testing.T) { + // Set up a starter state for the embedded tests, which should start from a copy of this state. + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Absolute(addrs.RootModuleInstance) + + t.Run("basic move", func(t *testing.T) { + s := state.DeepCopy() + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(addrs.RootModuleInstance) + + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if len(s.RootModule().Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(state.RootModule().Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("move to new module", func(t *testing.T) { + s := state.DeepCopy() + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("one")) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(dstModule) + + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if s.Module(dstModule) == nil { + t.Fatalf("child module %s not in state", dstModule.String()) + } + + if len(s.Module(dstModule).Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(s.Module(dstModule).Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("from a child module to root", func(t *testing.T) { + s := state.DeepCopy() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.IntKey(0)), // Moving the AbsResouce moves all instances + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.IntKey(1)), // Moving the AbsResouce moves all instances + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(addrs.RootModuleInstance) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + if len(s.RootModule().Resources) != 2 { + t.Fatalf("wrong number of resources in state; expected 2, found %d", len(s.RootModule().Resources)) + } + + if len(s.Resource(dst).Instances) != 2 { + t.Fatalf("wrong number of resource instances for dst, got %d expected 2", len(s.Resource(dst).Instances)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("module to new module", func(t *testing.T) { + s := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("exists")) + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("new")) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(dstModule) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + gotMod := s.Module(dstModule) + if len(gotMod.Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(gotMod.Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("module to new module", func(t *testing.T) { + s := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("exists")) + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("new")) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(dstModule) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + gotMod := s.Module(dstModule) + if len(gotMod.Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(gotMod.Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) +} + +func TestState_MaybeMoveAbsResource(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Absolute(addrs.RootModuleInstance) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(addrs.RootModuleInstance) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveAbsResource(src, dst) + if !moved { + t.Fatal("wrong result") + } + }) + + // Trying to move a resource that doesn't exist in state to a resource which does exist should be a noop. + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveAbsResource(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveAbsResourceInstance(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + // src resource from the state above + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + t.Run("resource to resource instance", func(t *testing.T) { + s := state.DeepCopy() + // For a little extra fun, move a resource to a resource instance: test_thing.foo to test_thing.foo[1] + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance) + + s.MoveAbsResourceInstance(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if len(s.RootModule().Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(state.RootModule().Resources)) + } + + got := s.ResourceInstance(dst) + if got == nil { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("move to new module", func(t *testing.T) { + s := state.DeepCopy() + // test_thing.foo to module.kinder.test_thing.foo["baz"] + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(dstModule) + + s.MoveAbsResourceInstance(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if s.Module(dstModule) == nil { + t.Fatalf("child module %s not in state", dstModule.String()) + } + + if len(s.Module(dstModule).Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(s.Module(dstModule).Resources)) + } + + got := s.ResourceInstance(dst) + if got == nil { + t.Fatalf("dst resource not in state") + } + }) +} + +func TestState_MaybeMoveAbsResourceInstance(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + // For a little extra fun, let's go from a resource to a resource instance: test_thing.foo to test_thing.bar[1] + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveAbsResourceInstance(src, dst) + if !moved { + t.Fatal("wrong result") + } + got := state.ResourceInstance(dst) + if got == nil { + t.Fatal("destination resource instance not in state") + } + }) + + // Moving a resource instance that doesn't exist in state to a resource which does exist should be a noop. + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveAbsResourceInstance(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveModuleInstance(t *testing.T) { + state := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + m := state.EnsureModule(srcModule) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + dstModule := addrs.RootModuleInstance.Child("child", addrs.IntKey(3)) + state.MoveModuleInstance(srcModule, dstModule) + + // srcModule should have been removed, dstModule should exist and have one resource + if len(state.Modules) != 2 { // kinder[3] and root + t.Fatalf("wrong number of modules in state. Expected 2, got %d", len(state.Modules)) + } + + got := state.Module(dstModule) + if got == nil { + t.Fatal("dstModule not found") + } + + gone := state.Module(srcModule) + if gone != nil { + t.Fatal("srcModule not removed from state") + } + + r := got.Resource(mustAbsResourceAddr("test_thing.foo").Resource) + if r.Addr.Module.String() != dstModule.String() { + fmt.Println(r.Addr.Module.String()) + t.Fatal("resource address was not updated") + } + +} + +func TestState_MaybeMoveModuleInstance(t *testing.T) { + state := NewState() + src := addrs.RootModuleInstance.Child("child", addrs.StringKey("a")) + cm := state.EnsureModule(src) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + dst := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("b")) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveModuleInstance(src, dst) + if !moved { + t.Fatal("wrong result") + } + }) + + // Second move, should be a noop + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveModuleInstance(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveModule(t *testing.T) { + // For this test, add two module instances (kinder and kinder["a"]). + // MoveModule(kinder) should move both instances. + state := NewState() // starter state, should be copied by the subtests. + srcModule := addrs.RootModule.Child("kinder") + m := state.EnsureModule(srcModule.UnkeyedInstanceShim()) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + moduleInstance := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("a")) + mi := state.EnsureModule(moduleInstance) + mi.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + _, mc := srcModule.Call() + src := mc.Absolute(addrs.RootModuleInstance.Child("kinder", addrs.NoKey)) + + t.Run("basic", func(t *testing.T) { + s := state.DeepCopy() + _, dstMC := addrs.RootModule.Child("child").Call() + dst := dstMC.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + s.MoveModule(src, dst) + + // srcModule should have been removed, dstModule should exist and have one resource + if len(s.Modules) != 3 { // child, child["a"] and root + t.Fatalf("wrong number of modules in state. Expected 3, got %d", len(s.Modules)) + } + + got := s.Module(dst.Module) + if got == nil { + t.Fatal("dstModule not found") + } + + got = s.Module(addrs.RootModuleInstance.Child("child", addrs.StringKey("a"))) + if got == nil { + t.Fatal("dstModule instance \"a\" not found") + } + + gone := s.Module(srcModule.UnkeyedInstanceShim()) + if gone != nil { + t.Fatal("srcModule not removed from state") + } + }) + + t.Run("nested modules", func(t *testing.T) { + s := state.DeepCopy() + + // add a child module to module.kinder + mi := mustParseModuleInstanceStr(`module.kinder.module.grand[1]`) + m := s.EnsureModule(mi) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + _, dstMC := addrs.RootModule.Child("child").Call() + dst := dstMC.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + s.MoveModule(src, dst) + + moved := s.Module(addrs.RootModuleInstance.Child("child", addrs.StringKey("a"))) + if moved == nil { + t.Fatal("dstModule not found") + } + + // The nested module's relative address should also have been updated + nested := s.Module(mustParseModuleInstanceStr(`module.child.module.grand[1]`)) + if nested == nil { + t.Fatal("nested child module of src wasn't moved") + } + }) +} + +func mustParseModuleInstanceStr(str string) addrs.ModuleInstance { + addr, diags := addrs.ParseModuleInstanceStr(str) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} + +func mustAbsResourceAddr(s string) addrs.AbsResource { + addr, diags := addrs.ParseAbsResourceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} diff --git a/internal/terraform/states/statefile/version4.go b/internal/terraform/states/statefile/version4.go index c2ef3acc..aa8af3e2 100644 --- a/internal/terraform/states/statefile/version4.go +++ b/internal/terraform/states/statefile/version4.go @@ -11,6 +11,7 @@ import ( ctyjson "github.com/zclconf/go-cty/cty/json" "github.com/camptocamp/terraboard/internal/terraform/addrs" + "github.com/camptocamp/terraboard/internal/terraform/lang/marks" "github.com/camptocamp/terraboard/internal/terraform/states" "github.com/camptocamp/terraboard/internal/terraform/tfdiags" ) @@ -164,7 +165,7 @@ func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { for _, path := range paths { pvm = append(pvm, cty.PathValueMarks{ Path: path, - Marks: cty.NewValueMarks("sensitive"), + Marks: cty.NewValueMarks(marks.Sensitive), }) } obj.AttrSensitivePaths = pvm @@ -538,7 +539,7 @@ type instanceObjectStateV4 struct { SchemaVersion uint64 `json:"schema_version"` AttributesRaw json.RawMessage `json:"attributes,omitempty"` AttributesFlat map[string]string `json:"attributes_flat,omitempty"` - AttributeSensitivePaths json.RawMessage `json:"sensitive_attributes,omitempty,"` + AttributeSensitivePaths json.RawMessage `json:"sensitive_attributes,omitempty"` PrivateRaw []byte `json:"private,omitempty"` diff --git a/internal/terraform/states/statemgr/filesystem_lock_unix.go b/internal/terraform/states/statemgr/filesystem_lock_unix.go index 1a970945..baa991a6 100644 --- a/internal/terraform/states/statemgr/filesystem_lock_unix.go +++ b/internal/terraform/states/statemgr/filesystem_lock_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package statemgr diff --git a/internal/terraform/states/statemgr/filesystem_lock_windows.go b/internal/terraform/states/statemgr/filesystem_lock_windows.go index 91b4a2a6..e4f78b67 100644 --- a/internal/terraform/states/statemgr/filesystem_lock_windows.go +++ b/internal/terraform/states/statemgr/filesystem_lock_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package statemgr diff --git a/internal/terraform/states/statemgr/filesystem_test.go b/internal/terraform/states/statemgr/filesystem_test.go index c207ae33..362f4cde 100644 --- a/internal/terraform/states/statemgr/filesystem_test.go +++ b/internal/terraform/states/statemgr/filesystem_test.go @@ -185,11 +185,7 @@ func TestFilesystem_backup(t *testing.T) { func TestFilesystem_backupAndReadPath(t *testing.T) { defer testOverrideVersion(t, "1.2.3")() - workDir, err := ioutil.TempDir("", "tf") - if err != nil { - t.Fatalf("failed to create temporary directory: %s", err) - } - defer os.RemoveAll(workDir) + workDir := t.TempDir() markerOutput := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) diff --git a/internal/terraform/states/sync.go b/internal/terraform/states/sync.go index d4c17417..3fca95a5 100644 --- a/internal/terraform/states/sync.go +++ b/internal/terraform/states/sync.go @@ -248,60 +248,6 @@ func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { return true } -// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a -// resource has changed from having "count" set to not set, or vice-versa, and -// so we need to rename the zeroth instance key to no key at all, or vice-versa. -// -// Set countEnabled to true if the resource has count set in its new -// configuration, or false if it does not. -// -// The state is modified in-place if necessary, moving a resource instance -// between the two addresses. The return value is true if a change was made, -// and false otherwise. -func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.ConfigResource, countEnabled bool) bool { - s.lock.Lock() - defer s.lock.Unlock() - - // get all modules instances that may match this state - modules := s.state.ModuleInstances(addr.Module) - if len(modules) == 0 { - return false - } - - changed := false - - for _, ms := range modules { - relAddr := addr.Resource - rs := ms.Resource(relAddr) - if rs == nil { - continue - } - - huntKey := addrs.NoKey - replaceKey := addrs.InstanceKey(addrs.IntKey(0)) - if !countEnabled { - huntKey, replaceKey = replaceKey, huntKey - } - - is, exists := rs.Instances[huntKey] - if !exists { - continue - } - - if _, exists := rs.Instances[replaceKey]; exists { - // If the replacement key also exists then we'll do nothing and keep both. - continue - } - - // If we get here then we need to "rename" from hunt to replace - rs.Instances[replaceKey] = is - delete(rs.Instances, huntKey) - changed = true - } - - return changed -} - // SetResourceInstanceCurrent saves the given instance object as the current // generation of the resource instance with the given address, simultaneously // updating the recorded provider configuration address, dependencies, and @@ -533,6 +479,16 @@ func (s *SyncState) Unlock() { s.lock.Unlock() } +// Close extracts the underlying state from inside this wrapper, making the +// wrapper invalid for any future operations. +func (s *SyncState) Close() *State { + s.lock.Lock() + ret := s.state + s.state = nil // make sure future operations can't still modify it + s.lock.Unlock() + return ret +} + // maybePruneModule will remove a module from the state altogether if it is // empty, unless it's the root module which must always be present. // @@ -554,3 +510,45 @@ func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { s.state.RemoveModule(addr) } } + +func (s *SyncState) MoveAbsResource(src, dst addrs.AbsResource) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.MoveAbsResource(src, dst) +} + +func (s *SyncState) MaybeMoveAbsResource(src, dst addrs.AbsResource) bool { + s.lock.Lock() + defer s.lock.Unlock() + + return s.state.MaybeMoveAbsResource(src, dst) +} + +func (s *SyncState) MoveResourceInstance(src, dst addrs.AbsResourceInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.MoveAbsResourceInstance(src, dst) +} + +func (s *SyncState) MaybeMoveResourceInstance(src, dst addrs.AbsResourceInstance) bool { + s.lock.Lock() + defer s.lock.Unlock() + + return s.state.MaybeMoveAbsResourceInstance(src, dst) +} + +func (s *SyncState) MoveModuleInstance(src, dst addrs.ModuleInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.MoveModuleInstance(src, dst) +} + +func (s *SyncState) MaybeMoveModuleInstance(src, dst addrs.ModuleInstance) bool { + s.lock.Lock() + defer s.lock.Unlock() + + return s.state.MaybeMoveModuleInstance(src, dst) +} diff --git a/internal/terraform/tfdiags/diagnostic.go b/internal/terraform/tfdiags/diagnostic.go index d84fa666..c241ab42 100644 --- a/internal/terraform/tfdiags/diagnostic.go +++ b/internal/terraform/tfdiags/diagnostic.go @@ -1,6 +1,8 @@ package tfdiags import ( + "fmt" + "github.com/hashicorp/hcl/v2" ) @@ -24,6 +26,20 @@ const ( Warning Severity = 'W' ) +// ToHCL converts a Severity to the equivalent HCL diagnostic severity. +func (s Severity) ToHCL() hcl.DiagnosticSeverity { + switch s { + case Warning: + return hcl.DiagWarning + case Error: + return hcl.DiagError + default: + // The above should always be exhaustive for all of the valid + // Severity values in this package. + panic(fmt.Sprintf("unknown diagnostic severity %s", s)) + } +} + type Description struct { Address string Summary string diff --git a/internal/terraform/tfdiags/hcl.go b/internal/terraform/tfdiags/hcl.go index 66e3e425..ad0d8220 100644 --- a/internal/terraform/tfdiags/hcl.go +++ b/internal/terraform/tfdiags/hcl.go @@ -1,8 +1,6 @@ package tfdiags import ( - "fmt" - "github.com/hashicorp/hcl/v2" ) @@ -110,19 +108,9 @@ func (diags Diagnostics) ToHCL() hcl.Diagnostics { fromExpr := diag.FromExpr() hclDiag := &hcl.Diagnostic{ - Summary: desc.Summary, - Detail: desc.Detail, - } - - switch severity { - case Warning: - hclDiag.Severity = hcl.DiagWarning - case Error: - hclDiag.Severity = hcl.DiagError - default: - // The above should always be exhaustive for all of the valid - // Severity values in this package. - panic(fmt.Sprintf("unknown diagnostic severity %s", severity)) + Summary: desc.Summary, + Detail: desc.Detail, + Severity: severity.ToHCL(), } if source.Subject != nil { hclDiag.Subject = source.Subject.ToHCL().Ptr() diff --git a/internal/terraform/typeexpr/get_type.go b/internal/terraform/typeexpr/get_type.go index de5465b9..726326ad 100644 --- a/internal/terraform/typeexpr/get_type.go +++ b/internal/terraform/typeexpr/get_type.go @@ -77,13 +77,20 @@ func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { } switch call.Name { - case "bool", "string", "number", "any": + case "bool", "string", "number": return cty.DynamicPseudoType, hcl.Diagnostics{{ Severity: hcl.DiagError, Summary: invalidTypeSummary, Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), Subject: &call.ArgsRange, }} + case "any": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Type constraint keyword %q does not expect arguments.", call.Name), + Subject: &call.ArgsRange, + }} } if len(call.Arguments) != 1 { diff --git a/internal/terraform/typeexpr/get_type_test.go b/internal/terraform/typeexpr/get_type_test.go new file mode 100644 index 00000000..e46dca3f --- /dev/null +++ b/internal/terraform/typeexpr/get_type_test.go @@ -0,0 +1,403 @@ +package typeexpr + +import ( + "fmt" + "testing" + + "github.com/hashicorp/hcl/v2/gohcl" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/json" + "github.com/zclconf/go-cty/cty" +) + +func TestGetType(t *testing.T) { + tests := []struct { + Source string + Constraint bool + Want cty.Type + WantError string + }{ + // keywords + { + `bool`, + false, + cty.Bool, + "", + }, + { + `number`, + false, + cty.Number, + "", + }, + { + `string`, + false, + cty.String, + "", + }, + { + `any`, + false, + cty.DynamicPseudoType, + `The keyword "any" cannot be used in this type specification: an exact type is required.`, + }, + { + `any`, + true, + cty.DynamicPseudoType, + "", + }, + { + `list`, + false, + cty.DynamicPseudoType, + "The list type constructor requires one argument specifying the element type.", + }, + { + `map`, + false, + cty.DynamicPseudoType, + "The map type constructor requires one argument specifying the element type.", + }, + { + `set`, + false, + cty.DynamicPseudoType, + "The set type constructor requires one argument specifying the element type.", + }, + { + `object`, + false, + cty.DynamicPseudoType, + "The object type constructor requires one argument specifying the attribute types and values as a map.", + }, + { + `tuple`, + false, + cty.DynamicPseudoType, + "The tuple type constructor requires one argument specifying the element types as a list.", + }, + + // constructors + { + `bool()`, + false, + cty.DynamicPseudoType, + `Primitive type keyword "bool" does not expect arguments.`, + }, + { + `number()`, + false, + cty.DynamicPseudoType, + `Primitive type keyword "number" does not expect arguments.`, + }, + { + `string()`, + false, + cty.DynamicPseudoType, + `Primitive type keyword "string" does not expect arguments.`, + }, + { + `any()`, + false, + cty.DynamicPseudoType, + `Type constraint keyword "any" does not expect arguments.`, + }, + { + `any()`, + true, + cty.DynamicPseudoType, + `Type constraint keyword "any" does not expect arguments.`, + }, + { + `list(string)`, + false, + cty.List(cty.String), + ``, + }, + { + `set(string)`, + false, + cty.Set(cty.String), + ``, + }, + { + `map(string)`, + false, + cty.Map(cty.String), + ``, + }, + { + `list()`, + false, + cty.DynamicPseudoType, + `The list type constructor requires one argument specifying the element type.`, + }, + { + `list(string, string)`, + false, + cty.DynamicPseudoType, + `The list type constructor requires one argument specifying the element type.`, + }, + { + `list(any)`, + false, + cty.List(cty.DynamicPseudoType), + `The keyword "any" cannot be used in this type specification: an exact type is required.`, + }, + { + `list(any)`, + true, + cty.List(cty.DynamicPseudoType), + ``, + }, + { + `object({})`, + false, + cty.EmptyObject, + ``, + }, + { + `object({name=string})`, + false, + cty.Object(map[string]cty.Type{"name": cty.String}), + ``, + }, + { + `object({"name"=string})`, + false, + cty.EmptyObject, + `Object constructor map keys must be attribute names.`, + }, + { + `object({name=nope})`, + false, + cty.Object(map[string]cty.Type{"name": cty.DynamicPseudoType}), + `The keyword "nope" is not a valid type specification.`, + }, + { + `object()`, + false, + cty.DynamicPseudoType, + `The object type constructor requires one argument specifying the attribute types and values as a map.`, + }, + { + `object(string)`, + false, + cty.DynamicPseudoType, + `Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.`, + }, + { + `tuple([])`, + false, + cty.EmptyTuple, + ``, + }, + { + `tuple([string, bool])`, + false, + cty.Tuple([]cty.Type{cty.String, cty.Bool}), + ``, + }, + { + `tuple([nope])`, + false, + cty.Tuple([]cty.Type{cty.DynamicPseudoType}), + `The keyword "nope" is not a valid type specification.`, + }, + { + `tuple()`, + false, + cty.DynamicPseudoType, + `The tuple type constructor requires one argument specifying the element types as a list.`, + }, + { + `tuple(string)`, + false, + cty.DynamicPseudoType, + `Tuple type constructor requires a list of element types.`, + }, + { + `shwoop(string)`, + false, + cty.DynamicPseudoType, + `Keyword "shwoop" is not a valid type constructor.`, + }, + { + `list("string")`, + false, + cty.List(cty.DynamicPseudoType), + `A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).`, + }, + + // More interesting combinations + { + `list(object({}))`, + false, + cty.List(cty.EmptyObject), + ``, + }, + { + `list(map(tuple([])))`, + false, + cty.List(cty.Map(cty.EmptyTuple)), + ``, + }, + + // Optional modifier + { + `object({name=string,age=optional(number)})`, + true, + cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "name": cty.String, + "age": cty.Number, + }, []string{"age"}), + ``, + }, + { + `object({name=string,meta=optional(any)})`, + true, + cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "name": cty.String, + "meta": cty.DynamicPseudoType, + }, []string{"meta"}), + ``, + }, + { + `object({name=string,age=optional(number)})`, + false, + cty.Object(map[string]cty.Type{ + "name": cty.String, + "age": cty.Number, + }), + `Optional attribute modifier is only for type constraints, not for exact types.`, + }, + { + `object({name=string,meta=optional(any)})`, + false, + cty.Object(map[string]cty.Type{ + "name": cty.String, + "meta": cty.DynamicPseudoType, + }), + `Optional attribute modifier is only for type constraints, not for exact types.`, + }, + { + `optional(string)`, + false, + cty.DynamicPseudoType, + `Keyword "optional" is valid only as a modifier for object type attributes.`, + }, + { + `optional`, + false, + cty.DynamicPseudoType, + `The keyword "optional" is not a valid type specification.`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s (constraint=%v)", test.Source, test.Constraint), func(t *testing.T) { + expr, diags := hclsyntax.ParseExpression([]byte(test.Source), "", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + t.Fatalf("failed to parse: %s", diags) + } + + got, diags := getType(expr, test.Constraint) + if test.WantError == "" { + for _, diag := range diags { + t.Error(diag) + } + } else { + found := false + for _, diag := range diags { + t.Log(diag) + if diag.Severity == hcl.DiagError && diag.Detail == test.WantError { + found = true + } + } + if !found { + t.Errorf("missing expected error detail message: %s", test.WantError) + } + } + + if !got.Equals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestGetTypeJSON(t *testing.T) { + // We have fewer test cases here because we're mainly exercising the + // extra indirection in the JSON syntax package, which ultimately calls + // into the native syntax parser (which we tested extensively in + // TestGetType). + tests := []struct { + Source string + Constraint bool + Want cty.Type + WantError string + }{ + { + `{"expr":"bool"}`, + false, + cty.Bool, + "", + }, + { + `{"expr":"list(bool)"}`, + false, + cty.List(cty.Bool), + "", + }, + { + `{"expr":"list"}`, + false, + cty.DynamicPseudoType, + "The list type constructor requires one argument specifying the element type.", + }, + } + + for _, test := range tests { + t.Run(test.Source, func(t *testing.T) { + file, diags := json.Parse([]byte(test.Source), "") + if diags.HasErrors() { + t.Fatalf("failed to parse: %s", diags) + } + + type TestContent struct { + Expr hcl.Expression `hcl:"expr"` + } + var content TestContent + diags = gohcl.DecodeBody(file.Body, nil, &content) + if diags.HasErrors() { + t.Fatalf("failed to decode: %s", diags) + } + + got, diags := getType(content.Expr, test.Constraint) + if test.WantError == "" { + for _, diag := range diags { + t.Error(diag) + } + } else { + found := false + for _, diag := range diags { + t.Log(diag) + if diag.Severity == hcl.DiagError && diag.Detail == test.WantError { + found = true + } + } + if !found { + t.Errorf("missing expected error detail message: %s", test.WantError) + } + } + + if !got.Equals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/terraform/typeexpr/type_string_test.go b/internal/terraform/typeexpr/type_string_test.go new file mode 100644 index 00000000..fbdf3f48 --- /dev/null +++ b/internal/terraform/typeexpr/type_string_test.go @@ -0,0 +1,100 @@ +package typeexpr + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestTypeString(t *testing.T) { + tests := []struct { + Type cty.Type + Want string + }{ + { + cty.DynamicPseudoType, + "any", + }, + { + cty.String, + "string", + }, + { + cty.Number, + "number", + }, + { + cty.Bool, + "bool", + }, + { + cty.List(cty.Number), + "list(number)", + }, + { + cty.Set(cty.Bool), + "set(bool)", + }, + { + cty.Map(cty.String), + "map(string)", + }, + { + cty.EmptyObject, + "object({})", + }, + { + cty.Object(map[string]cty.Type{"foo": cty.Bool}), + "object({foo=bool})", + }, + { + cty.Object(map[string]cty.Type{"foo": cty.Bool, "bar": cty.String}), + "object({bar=string,foo=bool})", + }, + { + cty.EmptyTuple, + "tuple([])", + }, + { + cty.Tuple([]cty.Type{cty.Bool}), + "tuple([bool])", + }, + { + cty.Tuple([]cty.Type{cty.Bool, cty.String}), + "tuple([bool,string])", + }, + { + cty.List(cty.DynamicPseudoType), + "list(any)", + }, + { + cty.Tuple([]cty.Type{cty.DynamicPseudoType}), + "tuple([any])", + }, + { + cty.Object(map[string]cty.Type{"foo": cty.DynamicPseudoType}), + "object({foo=any})", + }, + { + // We don't expect to find attributes that aren't valid identifiers + // because we only promise to support types that this package + // would've created, but we allow this situation during rendering + // just because it's convenient for applications trying to produce + // error messages about mismatched types. Note that the quoted + // attribute name is not actually accepted by our Type and + // TypeConstraint functions, so this is one situation where the + // TypeString result cannot be re-parsed by those functions. + cty.Object(map[string]cty.Type{"foo bar baz": cty.String}), + `object({"foo bar baz"=string})`, + }, + } + + for _, test := range tests { + t.Run(test.Type.GoString(), func(t *testing.T) { + got := TypeString(test.Type) + if got != test.Want { + t.Errorf("wrong result\ntype: %#v\ngot: %s\nwant: %s", test.Type, got, test.Want) + } + }) + } +} diff --git a/internal/terraform/typeexpr/type_type_test.go b/internal/terraform/typeexpr/type_type_test.go new file mode 100644 index 00000000..2286a2e1 --- /dev/null +++ b/internal/terraform/typeexpr/type_type_test.go @@ -0,0 +1,118 @@ +package typeexpr + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestTypeConstraintType(t *testing.T) { + tyVal1 := TypeConstraintVal(cty.String) + tyVal2 := TypeConstraintVal(cty.String) + tyVal3 := TypeConstraintVal(cty.Number) + + if !tyVal1.RawEquals(tyVal2) { + t.Errorf("tyVal1 not equal to tyVal2\ntyVal1: %#v\ntyVal2: %#v", tyVal1, tyVal2) + } + if tyVal1.RawEquals(tyVal3) { + t.Errorf("tyVal1 equal to tyVal2, but should not be\ntyVal1: %#v\ntyVal3: %#v", tyVal1, tyVal3) + } + + if got, want := TypeConstraintFromVal(tyVal1), cty.String; !got.Equals(want) { + t.Errorf("wrong type extracted from tyVal1\ngot: %#v\nwant: %#v", got, want) + } + if got, want := TypeConstraintFromVal(tyVal3), cty.Number; !got.Equals(want) { + t.Errorf("wrong type extracted from tyVal3\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestConvertFunc(t *testing.T) { + // This is testing the convert function directly, skipping over the HCL + // parsing and evaluation steps that would normally lead there. There is + // another test in the "integrationtest" package called TestTypeConvertFunc + // that exercises the full path to this function via the hclsyntax parser. + + tests := []struct { + val, ty cty.Value + want cty.Value + wantErr string + }{ + // The goal here is not an exhaustive set of conversions, since that's + // already covered in cty/convert, but rather exercising different + // permutations of success and failure to make sure the function + // handles all of the results in a reasonable way. + { + cty.StringVal("hello"), + TypeConstraintVal(cty.String), + cty.StringVal("hello"), + ``, + }, + { + cty.True, + TypeConstraintVal(cty.String), + cty.StringVal("true"), + ``, + }, + { + cty.StringVal("hello"), + TypeConstraintVal(cty.Bool), + cty.NilVal, + `a bool is required`, + }, + { + cty.UnknownVal(cty.Bool), + TypeConstraintVal(cty.Bool), + cty.UnknownVal(cty.Bool), + ``, + }, + { + cty.DynamicVal, + TypeConstraintVal(cty.Bool), + cty.UnknownVal(cty.Bool), + ``, + }, + { + cty.NullVal(cty.Bool), + TypeConstraintVal(cty.Bool), + cty.NullVal(cty.Bool), + ``, + }, + { + cty.NullVal(cty.DynamicPseudoType), + TypeConstraintVal(cty.Bool), + cty.NullVal(cty.Bool), + ``, + }, + { + cty.StringVal("hello").Mark(1), + TypeConstraintVal(cty.String), + cty.StringVal("hello").Mark(1), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v to %#v", test.val, test.ty), func(t *testing.T) { + got, err := ConvertFunc.Call([]cty.Value{test.val, test.ty}) + + if err != nil { + if test.wantErr != "" { + if got, want := err.Error(), test.wantErr; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + } else { + t.Errorf("unexpected error\ngot: %s\nwant: ", err) + } + return + } + if test.wantErr != "" { + t.Errorf("wrong error\ngot: \nwant: %s", test.wantErr) + } + + if !test.want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.want) + } + }) + } +} diff --git a/test/multiple-minio-buckets/config.yml b/test/multiple-minio-buckets/config.yml index 608f5252..82176ee0 100644 --- a/test/multiple-minio-buckets/config.yml +++ b/test/multiple-minio-buckets/config.yml @@ -4,6 +4,7 @@ log: database: password: mypassword + sslmode: disable provider: no-locks: true