Skip to content

Commit

Permalink
Fix lll lint violation, update golangci version (#1686)
Browse files Browse the repository at this point in the history
  • Loading branch information
serprex committed May 1, 2024
1 parent 5b8d94f commit 71239fe
Show file tree
Hide file tree
Showing 14 changed files with 63 additions and 61 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,6 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@v4.0.0
with:
skip-pkg-cache: true
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.52.2
version: v1.57
9 changes: 5 additions & 4 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@ linters-settings:
lll:
line-length: 140
staticcheck:
# SA1019 is disabled until WAL-G has migrated from the deprecated package golang.org/x/crypto/openpgp
checks: [ "all", "-SA1019" ]
checks: ["all", "-SA1019"]
revive:
rules:
- name: blank-imports
Expand Down Expand Up @@ -84,12 +83,14 @@ run:
go: "1.20"
timeout: 5m
modules-download-mode: readonly
skip-dirs:

issues:
exclude-dirs:
- "tests_func"
- "tmp"
- "submodules"
- "testtools"
skip-files:
exclude-files:
- ".*_test.go$"

severity:
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ TEST := "pg_tests"
MYSQL_TEST := "mysql_base_tests"
MONGO_MAJOR ?= "4.2"
MONGO_VERSION ?= "4.2.8"
GOLANGCI_LINT_VERSION ?= "v1.52.2"
GOLANGCI_LINT_VERSION ?= "v1.57"
REDIS_VERSION ?= "6.2.4"
TOOLS_MOD_DIR := ./internal/tools

Expand Down
12 changes: 6 additions & 6 deletions cmd/gp/backup_fetch.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ const (
restorePointDescription = "Fetch storage backup w/ restore point specified by name"
restorePointTSDescription = "Fetch storage backup w/ restore point time less or equal to the provided timestamp"
restoreConfigPathDescription = "Path to the cluster restore configuration"
fetchContentIdsDescription = "If set, WAL-G will fetch only the specified segments"
fetchContentIDsDescription = "If set, WAL-G will fetch only the specified segments"
fetchModeDescription = "Backup fetch mode. default: do the backup unpacking " +
"and prepare the configs [unpack+prepare], unpack: backup unpacking only, prepare: config preparation only."
inPlaceFlagDescription = "Perform the backup fetch in-place (without the restore config)"
Expand All @@ -30,7 +30,7 @@ var fetchTargetUserData string
var restorePointTS string
var restorePoint string
var restoreConfigPath string
var fetchContentIds *[]int
var fetchContentIDs *[]int
var fetchModeStr string
var inPlaceRestore bool
var partialRestoreArgs []string
Expand Down Expand Up @@ -68,15 +68,15 @@ var backupFetchCmd = &cobra.Command{

logsDir := viper.GetString(conf.GPLogsDirectory)

if len(*fetchContentIds) > 0 {
tracelog.InfoLogger.Printf("Will perform fetch operations only on the specified segments: %v", *fetchContentIds)
if len(*fetchContentIDs) > 0 {
tracelog.InfoLogger.Printf("Will perform fetch operations only on the specified segments: %v", *fetchContentIDs)
}

fetchMode, err := greenplum.NewBackupFetchMode(fetchModeStr)
tracelog.ErrorLogger.FatalOnError(err)

internal.HandleBackupFetch(storage.RootFolder(), targetBackupSelector,
greenplum.NewGreenplumBackupFetcher(restoreConfigPath, inPlaceRestore, logsDir, *fetchContentIds, fetchMode, restorePoint,
greenplum.NewGreenplumBackupFetcher(restoreConfigPath, inPlaceRestore, logsDir, *fetchContentIDs, fetchMode, restorePoint,
partialRestoreArgs))
},
}
Expand Down Expand Up @@ -113,7 +113,7 @@ func init() {
backupFetchCmd.Flags().StringVar(&restoreConfigPath, "restore-config",
"", restoreConfigPathDescription)
backupFetchCmd.Flags().BoolVar(&inPlaceRestore, "in-place", false, inPlaceFlagDescription)
fetchContentIds = backupFetchCmd.Flags().IntSlice("content-ids", []int{}, fetchContentIdsDescription)
fetchContentIDs = backupFetchCmd.Flags().IntSlice("content-ids", []int{}, fetchContentIDsDescription)
backupFetchCmd.Flags().StringSliceVar(&partialRestoreArgs, "restore-only", nil, restoreOnlyDescription)

backupFetchCmd.Flags().StringVar(&fetchModeStr, "mode", "default", fetchModeDescription)
Expand Down
13 changes: 7 additions & 6 deletions cmd/mongo/binary_backup_fetch.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ const (
RsNameDescription = "Name of replicaset (like rs01)"
RsMembersFlag = "mongo-rs-members"
RsMembersDescription = "Comma separated host:port records from wished rs members (like rs.initiate())"
RsMemberIdsFlag = "mongo-rs-member-ids"
RsMemberIdsDescription = "Comma separated integers for replica IDs of corresponding --mongo-rs-members"
RsMemberIDsFlag = "mongo-rs-member-ids"
RsMemberIDsDescription = "Comma separated integers for replica IDs of corresponding --mongo-rs-members"
ShNameFlag = "mongo-sh-name"
ShNameDescription = "Name of shard"
ShCfgConnStr = "mongo-cfg-conn-str"
Expand All @@ -41,7 +41,7 @@ var (
minimalConfigPath = ""
rsName = ""
rsMembers []string
rsMemberIds []int
rsMemberIDs []int
shardName = ""
mongocfgConnectionString = ""
shardConnectionStrings []string
Expand All @@ -65,8 +65,9 @@ var binaryBackupFetchCmd = &cobra.Command{
mongodConfigPath := args[1]
mongodVersion := args[2]

err := mongo.HandleBinaryFetchPush(ctx, mongodConfigPath, minimalConfigPath, backupName, mongodVersion, rsName,
rsMembers, rsMemberIds, shardName, mongocfgConnectionString, shardConnectionStrings, skipBackupDownloadFlag, skipMongoReconfigFlag, skipCheckFlag)
err := mongo.HandleBinaryFetchPush(ctx, mongodConfigPath, minimalConfigPath, backupName, mongodVersion,
rsName, rsMembers, rsMemberIDs, shardName, mongocfgConnectionString, shardConnectionStrings,
skipBackupDownloadFlag, skipMongoReconfigFlag, skipCheckFlag)
tracelog.ErrorLogger.FatalOnError(err)
},
}
Expand All @@ -75,7 +76,7 @@ func init() {
binaryBackupFetchCmd.Flags().StringVar(&minimalConfigPath, MinimalConfigPathFlag, "", MinimalConfigPathDescription)
binaryBackupFetchCmd.Flags().StringVar(&rsName, RsNameFlag, "", RsNameDescription)
binaryBackupFetchCmd.Flags().StringSliceVar(&rsMembers, RsMembersFlag, []string{}, RsMembersDescription)
binaryBackupFetchCmd.Flags().IntSliceVar(&rsMemberIds, RsMemberIdsFlag, []int{}, RsMemberIdsDescription)
binaryBackupFetchCmd.Flags().IntSliceVar(&rsMemberIDs, RsMemberIDsFlag, []int{}, RsMemberIDsDescription)
binaryBackupFetchCmd.Flags().StringVar(&shardName, ShNameFlag, "", ShNameDescription)
binaryBackupFetchCmd.Flags().StringVar(&mongocfgConnectionString, ShCfgConnStr, "", ShCfgConnStrDescription)
binaryBackupFetchCmd.Flags().StringArrayVar(&shardConnectionStrings, ShShardConnStr, []string{}, ShShardConnStrDescription)
Expand Down
13 changes: 7 additions & 6 deletions cmd/mongo/oplog_replay.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,36 +54,37 @@ type oplogReplayRunArgs struct {
oplogApplicationMode *string
}

func buildOplogReplayRunArgs(cmdargs []string) (args oplogReplayRunArgs, err error) {
func buildOplogReplayRunArgs(cmdargs []string) (oplogReplayRunArgs, error) {
var args oplogReplayRunArgs
// resolve archiving settings
downloader, err := archive.NewStorageDownloader(archive.NewDefaultStorageSettings())
if err != nil {
return args, err
}
args.since, err = processArg(cmdargs[0], downloader)
if err != nil {
return
return args, err
}
args.until, err = processArg(cmdargs[1], downloader)
if err != nil {
return
return args, err
}

// TODO: fix ugly config
if ignoreErrCodesStr, ok := conf.GetSetting(conf.OplogReplayIgnoreErrorCodes); ok {
if err = json.Unmarshal([]byte(ignoreErrCodesStr), &args.ignoreErrCodes); err != nil {
return
return args, err
}
}

args.mongodbURL, err = conf.GetRequiredSetting(conf.MongoDBUriSetting)
if err != nil {
return
return args, err
}

oplogAlwaysUpsert, hasOplogAlwaysUpsert, err := conf.GetBoolSetting(conf.OplogReplayOplogAlwaysUpsert)
if err != nil {
return
return args, err
}
if hasOplogAlwaysUpsert {
args.oplogAlwaysUpsert = &oplogAlwaysUpsert
Expand Down
4 changes: 2 additions & 2 deletions internal/crypto/libsodium/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,11 @@ func (reader *Reader) Read(p []byte) (n int, err error) {
return
}

func (reader *Reader) readNextChunk() (err error) {
func (reader *Reader) readNextChunk() error {
n, err := io.ReadFull(reader.Reader, reader.in)

if err != nil && err != io.ErrUnexpectedEOF {
return
return err
}

var outLen C.ulonglong
Expand Down
14 changes: 7 additions & 7 deletions internal/databases/greenplum/backup_fetch_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ type FetchHandler struct {
func NewFetchHandler(
backup internal.Backup, sentinel BackupSentinelDto,
segCfgMaker SegConfigMaker, logsDir string,
fetchContentIds []int, mode BackupFetchMode,
fetchContentIDs []int, mode BackupFetchMode,
restorePoint string, partialRestoreArgs []string,
) *FetchHandler {
backupIDByContentID := make(map[int]string)
Expand Down Expand Up @@ -91,20 +91,20 @@ func NewFetchHandler(
cluster: globalCluster,
backupIDByContentID: backupIDByContentID,
backup: backup,
contentIDsToFetch: prepareContentIDsToFetch(fetchContentIds, segmentConfigs),
contentIDsToFetch: prepareContentIDsToFetch(fetchContentIDs, segmentConfigs),
fetchMode: mode,
restorePoint: restorePoint,
partialRestoreArgs: partialRestoreArgs,
}
}

// prepareContentIDsToFetch returns a set containing the IDs of segments to be fetched
func prepareContentIDsToFetch(fetchContentIds []int, segmentConfigs []cluster.SegConfig) map[int]bool {
func prepareContentIDsToFetch(fetchContentIDs []int, segmentConfigs []cluster.SegConfig) map[int]bool {
contentIDsToFetch := make(map[int]bool)

// if user set the specific content IDs, use only them, otherwise fetch all
if len(fetchContentIds) > 0 {
for _, id := range fetchContentIds {
if len(fetchContentIDs) > 0 {
for _, id := range fetchContentIDs {
contentIDsToFetch[id] = true
}
} else {
Expand Down Expand Up @@ -263,7 +263,7 @@ func (fh *FetchHandler) buildFetchCommand(contentID int) string {
}

func NewGreenplumBackupFetcher(restoreCfgPath string, inPlaceRestore bool, logsDir string,
fetchContentIds []int, mode BackupFetchMode, restorePoint string, partialRestoreArgs []string,
fetchContentIDs []int, mode BackupFetchMode, restorePoint string, partialRestoreArgs []string,
) func(folder storage.Folder, backup internal.Backup) {
return func(folder storage.Folder, backup internal.Backup) {
tracelog.InfoLogger.Printf("Starting backup-fetch for %s", backup.Name)
Expand All @@ -277,7 +277,7 @@ func NewGreenplumBackupFetcher(restoreCfgPath string, inPlaceRestore bool, logsD
segCfgMaker, err := NewSegConfigMaker(restoreCfgPath, inPlaceRestore)
tracelog.ErrorLogger.FatalOnError(err)

err = NewFetchHandler(backup, sentinel, segCfgMaker, logsDir, fetchContentIds, mode, restorePoint, partialRestoreArgs).Fetch()
err = NewFetchHandler(backup, sentinel, segCfgMaker, logsDir, fetchContentIDs, mode, restorePoint, partialRestoreArgs).Fetch()
tracelog.ErrorLogger.FatalOnError(err)
}
}
Expand Down
12 changes: 6 additions & 6 deletions internal/databases/greenplum/backup_push_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ type CurrBackupInfo struct {
type PrevBackupInfo struct {
name string
sentinelDto BackupSentinelDto
deltaBaseBackupIds map[int]string
deltaBaseBackupIDs map[int]string
}

// BackupHandler is the main struct which is handling the backup process
Expand Down Expand Up @@ -156,7 +156,7 @@ func (bh *BackupHandler) addSegmentDeltaBaseArg(contentID int, args []string) []
return args
}

backupID, ok := bh.prevBackupInfo.deltaBaseBackupIds[contentID]
backupID, ok := bh.prevBackupInfo.deltaBaseBackupIDs[contentID]
if !ok {
tracelog.WarningLogger.Printf(
"unable to find the requested contentID %d in metadata of the base backup %s, "+
Expand Down Expand Up @@ -724,18 +724,18 @@ func (bh *BackupHandler) configureDeltaBackup() (err error) {
return err
}

bh.loadDeltaBaseBackupIds()
bh.loadDeltaBaseBackupIDs()

return nil
}

func (bh *BackupHandler) loadDeltaBaseBackupIds() {
bh.prevBackupInfo.deltaBaseBackupIds = make(map[int]string)
func (bh *BackupHandler) loadDeltaBaseBackupIDs() {
bh.prevBackupInfo.deltaBaseBackupIDs = make(map[int]string)

for i := range bh.prevBackupInfo.sentinelDto.Segments {
backupID := bh.prevBackupInfo.sentinelDto.Segments[i].BackupID
contentID := bh.prevBackupInfo.sentinelDto.Segments[i].ContentID
bh.prevBackupInfo.deltaBaseBackupIds[contentID] = backupID
bh.prevBackupInfo.deltaBaseBackupIDs[contentID] = backupID
}
}

Expand Down
24 changes: 12 additions & 12 deletions internal/databases/mongo/binary/mongod.go
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ func makeBsonRsMembers(rsConfig RsConfig) bson.A {
bsonMembers := bson.A{}

for i := 0; i != len(rsConfig.RsMembers); i++ {
bsonMembers = append(bsonMembers, bson.M{"_id": rsConfig.RsMemberIds[i], "host": rsConfig.RsMembers[i]})
bsonMembers = append(bsonMembers, bson.M{"_id": rsConfig.RsMemberIDs[i], "host": rsConfig.RsMembers[i]})
}

return bsonMembers
Expand Down Expand Up @@ -346,15 +346,15 @@ type RestoreArgs struct {
BackupName string
RestoreVersion string

SkipBackupDownload bool
SkipChecks bool
SkipMongoReconfig bool
SkipBackupDownload bool
SkipChecks bool
SkipMongoReconfig bool
}

type RsConfig struct {
RsName string
RsMembers []string
RsMemberIds []int
RsMemberIDs []int
}

type ShConfig struct {
Expand All @@ -366,17 +366,17 @@ type MongoCfgConfig struct {
Shards map[string]string
}

func NewRsConfig(rsName string, rsMembers []string, rsMemberIds []int) RsConfig {
if len(rsMemberIds) == 0 {
rsMemberIds = make([]int, len(rsMembers))
func NewRsConfig(rsName string, rsMembers []string, rsMemberIDs []int) RsConfig {
if len(rsMemberIDs) == 0 {
rsMemberIDs = make([]int, len(rsMembers))
for i := 0; i < len(rsMembers); i++ {
rsMemberIds[i] = i
rsMemberIDs[i] = i
}
}
return RsConfig{
RsName: rsName,
RsMembers: rsMembers,
RsMemberIds: rsMemberIds,
RsMemberIDs: rsMemberIDs,
}
}

Expand Down Expand Up @@ -417,10 +417,10 @@ func (rsConfig RsConfig) Validate() error {
if rsConfig.RsName == "" && len(rsConfig.RsMembers) > 0 || rsConfig.RsName != "" && len(rsConfig.RsMembers) == 0 {
return errors.Errorf("rsConfig should be all empty or full populated, but rsConfig = %+v", rsConfig)
}
if len(rsConfig.RsMembers) > len(rsConfig.RsMemberIds) {
if len(rsConfig.RsMembers) > len(rsConfig.RsMemberIDs) {
return errors.Errorf("not all replica set members have corresponding ID")
}
if len(rsConfig.RsMembers) < len(rsConfig.RsMemberIds) {
if len(rsConfig.RsMembers) < len(rsConfig.RsMemberIDs) {
return errors.Errorf("excessive number of replica set IDs")
}
return nil
Expand Down
6 changes: 3 additions & 3 deletions internal/databases/mongo/binary/mongod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ func TestMakeBsonRsMembers(t *testing.T) {
assert.Equal(t, bson.A{}, makeBsonRsMembers(RsConfig{}))
assert.Equal(t, bson.A{bson.M{"_id": 0, "host": "localhost:1234"}}, makeBsonRsMembers(RsConfig{
RsMembers: []string{"localhost:1234"},
RsMemberIds: []int{0},
RsMemberIDs: []int{0},
}))
assert.Equal(t,
bson.A{
Expand All @@ -22,7 +22,7 @@ func TestMakeBsonRsMembers(t *testing.T) {
makeBsonRsMembers(RsConfig{
RsName: "",
RsMembers: []string{"localhost:1234", "localhost:5678", "remotehost:9876"},
RsMemberIds: []int{0, 1, 2},
RsMemberIDs: []int{0, 1, 2},
}))
assert.Equal(t,
bson.A{
Expand All @@ -33,6 +33,6 @@ func TestMakeBsonRsMembers(t *testing.T) {
makeBsonRsMembers(RsConfig{
RsName: "",
RsMembers: []string{"localhost:1234", "localhost:5678", "remotehost:9876"},
RsMemberIds: []int{4, 5, 0},
RsMemberIDs: []int{4, 5, 0},
}))
}
4 changes: 2 additions & 2 deletions internal/databases/mongo/binary_backup_fetch_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ func HandleBinaryFetchPush(
ctx context.Context,
mongodConfigPath, minimalConfigPath, backupName, restoreMongodVersion, rsName string,
rsMembers []string,
rsMemberIds []int,
rsMemberIDs []int,
shardName, mongoCfgConnectionString string,
shardConnectionStrings []string,
skipBackupDownload, skipReconfig, skipChecks bool,
Expand Down Expand Up @@ -43,7 +43,7 @@ func HandleBinaryFetchPush(
return err
}

rsConfig := binary.NewRsConfig(rsName, rsMembers, rsMemberIds)
rsConfig := binary.NewRsConfig(rsName, rsMembers, rsMemberIDs)
if err = rsConfig.Validate(); err != nil {
return err
}
Expand Down

0 comments on commit 71239fe

Please sign in to comment.