Skip to content

Commit

Permalink
command/mv: fix option usage (#338)
Browse files Browse the repository at this point in the history
* Some of the command options are not working in move command since we forget passing cli.Context flags to Copy struct. Use generic NewCopy constructor for both copy/move.
  • Loading branch information
sonmezonur committed Aug 12, 2021
1 parent fb03bce commit d5e5ea3
Show file tree
Hide file tree
Showing 3 changed files with 117 additions and 62 deletions.
82 changes: 44 additions & 38 deletions command/cp.go
Expand Up @@ -72,28 +72,28 @@ Examples:
> s5cmd {{.HelpName}} -n -s -u s3://bucket/source-prefix/* s3://bucket/target-prefix/
12. Perform KMS Server Side Encryption of the object(s) at the destination
> s5cmd {{.HelpName}} --sse aws:kms s3://bucket/object s3://target-bucket/prefix/object
> s5cmd {{.HelpName}} --sse aws:kms s3://bucket/object s3://target-bucket/prefix/object
13. Perform KMS-SSE of the object(s) at the destination using customer managed Customer Master Key (CMK) key id
> s5cmd {{.HelpName}} --sse aws:kms --sse-kms-key-id <your-kms-key-id> s3://bucket/object s3://target-bucket/prefix/object
> s5cmd {{.HelpName}} --sse aws:kms --sse-kms-key-id <your-kms-key-id> s3://bucket/object s3://target-bucket/prefix/object
14. Force transfer of GLACIER objects with a prefix whether they are restored or not
> s5cmd {{.HelpName}} --force-glacier-transfer s3://bucket/prefix/* target-directory/
> s5cmd {{.HelpName}} --force-glacier-transfer s3://bucket/prefix/* target-directory/
15. Upload a file to S3 bucket with public read s3 acl
> s5cmd {{.HelpName}} --acl "public-read" myfile.gz s3://bucket/
> s5cmd {{.HelpName}} --acl "public-read" myfile.gz s3://bucket/
16. Upload a file to S3 bucket with expires header
> s5cmd {{.HelpName}} --expires "2024-10-01T20:30:00Z" myfile.gz s3://bucket/
> s5cmd {{.HelpName}} --expires "2024-10-01T20:30:00Z" myfile.gz s3://bucket/
17. Upload a file to S3 bucket with cache-control header
> s5cmd {{.HelpName}} --cache-control "public, max-age=345600" myfile.gz s3://bucket/
> s5cmd {{.HelpName}} --cache-control "public, max-age=345600" myfile.gz s3://bucket/
18. Copy all files to S3 bucket but exclude the ones with txt and gz extension
> s5cmd cp --exclude "*.txt" --exclude "*.gz" dir/ s3://bucket
18. Copy all files to S3 bucket but exclude the ones with txt and gz extension
> s5cmd {{.HelpName}} --exclude "*.txt" --exclude "*.gz" dir/ s3://bucket
19. Copy all files from S3 bucket to another S3 bucket but exclude the ones starts with log
> s5cmd cp --exclude "log*" s3://bucket/* s3://destbucket
> s5cmd {{.HelpName}} --exclude "log*" s3://bucket/* s3://destbucket
`

func NewCopyCommandFlags() []cli.Flag {
Expand Down Expand Up @@ -198,35 +198,8 @@ func NewCopyCommand() *cli.Command {
Action: func(c *cli.Context) (err error) {
defer stat.Collect(c.Command.FullName(), &err)()

return Copy{
src: c.Args().Get(0),
dst: c.Args().Get(1),
op: c.Command.Name,
fullCommand: givenCommand(c),
deleteSource: false, // don't delete source
// flags
noClobber: c.Bool("no-clobber"),
ifSizeDiffer: c.Bool("if-size-differ"),
ifSourceNewer: c.Bool("if-source-newer"),
flatten: c.Bool("flatten"),
followSymlinks: !c.Bool("no-follow-symlinks"),
storageClass: storage.StorageClass(c.String("storage-class")),
concurrency: c.Int("concurrency"),
partSize: c.Int64("part-size") * megabytes,
encryptionMethod: c.String("sse"),
encryptionKeyID: c.String("sse-kms-key-id"),
acl: c.String("acl"),
forceGlacierTransfer: c.Bool("force-glacier-transfer"),
exclude: c.StringSlice("exclude"),
raw: c.Bool("raw"),
cacheControl: c.String("cache-control"),
expires: c.String("expires"),
// region settings
srcRegion: c.String("source-region"),
dstRegion: c.String("destination-region"),

storageOpts: NewStorageOpts(c),
}.Run(c.Context)
// don't delete source
return NewCopy(c, false).Run(c.Context)
},
}
}
Expand Down Expand Up @@ -266,6 +239,39 @@ type Copy struct {
storageOpts storage.Options
}

// NewCopy creates Copy from cli.Context.
func NewCopy(c *cli.Context, deleteSource bool) Copy {
return Copy{
src: c.Args().Get(0),
dst: c.Args().Get(1),
op: c.Command.Name,
fullCommand: givenCommand(c),
deleteSource: deleteSource,
// flags
noClobber: c.Bool("no-clobber"),
ifSizeDiffer: c.Bool("if-size-differ"),
ifSourceNewer: c.Bool("if-source-newer"),
flatten: c.Bool("flatten"),
followSymlinks: !c.Bool("no-follow-symlinks"),
storageClass: storage.StorageClass(c.String("storage-class")),
concurrency: c.Int("concurrency"),
partSize: c.Int64("part-size") * megabytes,
encryptionMethod: c.String("sse"),
encryptionKeyID: c.String("sse-kms-key-id"),
acl: c.String("acl"),
forceGlacierTransfer: c.Bool("force-glacier-transfer"),
exclude: c.StringSlice("exclude"),
raw: c.Bool("raw"),
cacheControl: c.String("cache-control"),
expires: c.String("expires"),
// region settings
srcRegion: c.String("source-region"),
dstRegion: c.String("destination-region"),

storageOpts: NewStorageOpts(c),
}
}

const fdlimitWarning = `
WARNING: s5cmd is hitting the max open file limit allowed by your OS. Either
increase the open file limit or try to decrease the number of workers with
Expand Down
32 changes: 8 additions & 24 deletions command/mv.go
Expand Up @@ -2,7 +2,6 @@ package command

import (
"github.com/peak/s5cmd/log/stat"
"github.com/peak/s5cmd/storage"

"github.com/urfave/cli/v2"
)
Expand Down Expand Up @@ -31,6 +30,12 @@ Examples:
5. Move a directory to S3 bucket recursively
> s5cmd {{.HelpName}} dir/ s3://bucket/
6. Move all files to S3 bucket but exclude the ones with txt and gz extension
> s5cmd {{.HelpName}} --exclude "*.txt" --exclude "*.gz" dir/ s3://bucket
7. Move all files from S3 bucket to another S3 bucket but exclude the ones starts with log
> s5cmd {{.HelpName}} --exclude "log*" s3://bucket/* s3://destbucket
`

func NewMoveCommand() *cli.Command {
Expand All @@ -46,29 +51,8 @@ func NewMoveCommand() *cli.Command {
Action: func(c *cli.Context) (err error) {
defer stat.Collect(c.Command.FullName(), &err)()

copyCommand := Copy{
src: c.Args().Get(0),
dst: c.Args().Get(1),
op: c.Command.Name,
fullCommand: givenCommand(c),
deleteSource: true, // delete source
// flags
noClobber: c.Bool("no-clobber"),
ifSizeDiffer: c.Bool("if-size-differ"),
ifSourceNewer: c.Bool("if-source-newer"),
flatten: c.Bool("flatten"),
followSymlinks: !c.Bool("no-follow-symlinks"),
storageClass: storage.StorageClass(c.String("storage-class")),
encryptionMethod: c.String("sse"),
encryptionKeyID: c.String("sse-kms-key-id"),
acl: c.String("acl"),
cacheControl: c.String("cache-control"),
expires: c.String("expires"),

storageOpts: NewStorageOpts(c),
}

return copyCommand.Run(c.Context)
// delete source
return NewCopy(c, true).Run(c.Context)
},
}
}
65 changes: 65 additions & 0 deletions e2e/mv_test.go
Expand Up @@ -3,6 +3,7 @@ package e2e
import (
"fmt"
"path/filepath"
"runtime"
"testing"

"gotest.tools/v3/assert"
Expand Down Expand Up @@ -360,3 +361,67 @@ func TestMoveMultipleS3ObjectsToS3DryRun(t *testing.T) {
assertError(t, err, errS3NoSuchKey)
}
}

// mv --raw file s3://bucket/
func TestMoveLocalObjectToS3WithRawFlag(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}

t.Parallel()

bucket := s3BucketFromTestName(t)

s3client, s5cmd, cleanup := setup(t)
defer cleanup()

createBucket(t, s3client, bucket)

objectsToMove := []fs.PathOp{
fs.WithFile("a*.txt", "content"),
}

otherObjects := []fs.PathOp{
fs.WithDir(
"a*b",
fs.WithFile("file.txt", "content"),
),

fs.WithFile("abc.txt", "content"),
}

folderLayout := append(objectsToMove, otherObjects...)

workdir := fs.NewDir(t, t.Name(), folderLayout...)
defer workdir.Remove()

srcpath := filepath.ToSlash(workdir.Join("a*.txt"))
dstpath := fmt.Sprintf("s3://%v", bucket)

cmd := s5cmd("mv", "--raw", srcpath, dstpath)
result := icmd.RunCmd(cmd)

result.Assert(t, icmd.Success)

assertLines(t, result.Stdout(), map[int]compareFunc{
0: equals("mv %v %v/a*.txt", srcpath, dstpath),
}, sortInput(true))

expectedObjects := []string{"a*.txt"}
for _, obj := range expectedObjects {
err := ensureS3Object(s3client, bucket, obj, "content")
if err != nil {
t.Fatalf("Object %s is not in S3\n", obj)
}
}

nonExpectedObjects := []string{"a*b/file.txt", "abc.txt"}
for _, obj := range nonExpectedObjects {
err := ensureS3Object(s3client, bucket, obj, "content")
assertError(t, err, errS3NoSuchKey)
}

// assert local filesystem
expected := fs.Expected(t, otherObjects...)
assert.Assert(t, fs.Equal(workdir.Path(), expected))
}

0 comments on commit d5e5ea3

Please sign in to comment.