Skip to content

Commit

Permalink
Automatically create the hash data directory when using an alternative (
Browse files Browse the repository at this point in the history
#2611)

* Automatically create the hash data directory when using an alternative

* Remove sync.Once from struct

* Remove unnecessary struct element

* Fix test

* Disable XAttr testing in CI

* Fix path joining in and test --delete-destination (#2620)

* Automatically create the hash data directory when using an alternative

* Fix path joining in and test --delete-destination

* Create share test fix

* Backport fixes from NewE2E early exit changes

* Remove sync.Once struct from hidden file adapter

* Prevent crash in blob

* Use my braincells
  • Loading branch information
adreed-msft committed May 9, 2024
1 parent d323e0b commit 8aec227
Show file tree
Hide file tree
Showing 16 changed files with 396 additions and 72 deletions.
96 changes: 48 additions & 48 deletions cmd/syncProcessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ func newSyncDeleteProcessor(cca *cookedSyncCmdArgs, fpo common.FolderPropertyOpt
if err != nil {
return nil, err
}

return newInteractiveDeleteProcessor(deleter.delete, cca.deleteDestination, cca.fromTo.To().String(), cca.destination, cca.incrementDeletionCount, cca.dryrunMode), nil
}

Expand Down Expand Up @@ -284,7 +284,7 @@ func (b *remoteResourceDeleter) getObjectURL(objectURL string) (*url.URL, error)
if err != nil {
return nil, err
}
return u,nil
return u, nil
}

func (b *remoteResourceDeleter) delete(object StoredObject) error {
Expand All @@ -305,12 +305,12 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error {

var err error
var objURL *url.URL

switch b.targetLocation {
case common.ELocation.Blob():
bsc, _ := sc.BlobServiceClient()
var blobClient *blob.Client = bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath + object.relativePath))
var blobClient *blob.Client = bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath, object.relativePath))

objURL, err = b.getObjectURL(blobClient.URL())
if err != nil {
break
Expand All @@ -321,7 +321,7 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error {
_, err = blobClient.Delete(b.ctx, nil)
case common.ELocation.File():
fsc, _ := sc.FileServiceClient()
fileClient := fsc.NewShareClient(b.containerName).NewRootDirectoryClient().NewFileClient(path.Join(b.rootPath + object.relativePath))
fileClient := fsc.NewShareClient(b.containerName).NewRootDirectoryClient().NewFileClient(path.Join(b.rootPath, object.relativePath))

objURL, err = b.getObjectURL(fileClient.URL())
if err != nil {
Expand All @@ -330,13 +330,13 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error {
b.folderManager.RecordChildExists(objURL)
defer b.folderManager.RecordChildDeleted(objURL)

err = common.DoWithOverrideReadOnlyOnAzureFiles(b.ctx, func()(interface{}, error) {
err = common.DoWithOverrideReadOnlyOnAzureFiles(b.ctx, func() (interface{}, error) {
return fileClient.Delete(b.ctx, nil)
}, fileClient, b.forceIfReadOnly)
case common.ELocation.BlobFS():
dsc, _ := sc.DatalakeServiceClient()
fileClient := dsc.NewFileSystemClient(b.containerName).NewFileClient(path.Join(b.rootPath + object.relativePath))
fileClient := dsc.NewFileSystemClient(b.containerName).NewFileClient(path.Join(b.rootPath, object.relativePath))

objURL, err = b.getObjectURL(fileClient.DFSURL())
if err != nil {
break
Expand Down Expand Up @@ -369,48 +369,48 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error {
var objURL *url.URL
var err error
switch b.targetLocation {
case common.ELocation.Blob():
bsc, _ := sc.BlobServiceClient()
blobClient := bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath + object.relativePath))
// HNS endpoint doesn't like delete snapshots on a directory
objURL, err = b.getObjectURL(blobClient.URL())
if err != nil {
return err
}
case common.ELocation.Blob():
bsc, _ := sc.BlobServiceClient()
blobClient := bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath, object.relativePath))
// HNS endpoint doesn't like delete snapshots on a directory
objURL, err = b.getObjectURL(blobClient.URL())
if err != nil {
return err
}

deleteFunc = func(ctx context.Context, logger common.ILogger) bool {
_, err = blobClient.Delete(b.ctx, nil)
return (err == nil)
}
case common.ELocation.File():
fsc, _ := sc.FileServiceClient()
dirClient := fsc.NewShareClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath + object.relativePath))
objURL, err = b.getObjectURL(dirClient.URL())
if err != nil {
return err
}
deleteFunc = func(ctx context.Context, logger common.ILogger) bool {
_, err = blobClient.Delete(b.ctx, nil)
return (err == nil)
}
case common.ELocation.File():
fsc, _ := sc.FileServiceClient()
dirClient := fsc.NewShareClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath, object.relativePath))
objURL, err = b.getObjectURL(dirClient.URL())
if err != nil {
return err
}

deleteFunc = func(ctx context.Context, logger common.ILogger) bool {
err = common.DoWithOverrideReadOnlyOnAzureFiles(b.ctx, func()(interface{}, error) {
return dirClient.Delete(b.ctx, nil)
}, dirClient, b.forceIfReadOnly)
return (err == nil)
}
case common.ELocation.BlobFS():
dsc, _ := sc.DatalakeServiceClient()
directoryClient := dsc.NewFileSystemClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath + object.relativePath))
objURL, err = b.getObjectURL(directoryClient.DFSURL())
if err != nil {
return err
}
deleteFunc = func(ctx context.Context, logger common.ILogger) bool {
err = common.DoWithOverrideReadOnlyOnAzureFiles(b.ctx, func() (interface{}, error) {
return dirClient.Delete(b.ctx, nil)
}, dirClient, b.forceIfReadOnly)
return (err == nil)
}
case common.ELocation.BlobFS():
dsc, _ := sc.DatalakeServiceClient()
directoryClient := dsc.NewFileSystemClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath, object.relativePath))
objURL, err = b.getObjectURL(directoryClient.DFSURL())
if err != nil {
return err
}

deleteFunc = func(ctx context.Context, logger common.ILogger) bool {
recursiveContext := common.WithRecursive(b.ctx, false)
_, err = directoryClient.Delete(recursiveContext, nil)
return (err == nil)
}
default:
panic("not implemented, check your code")
deleteFunc = func(ctx context.Context, logger common.ILogger) bool {
recursiveContext := common.WithRecursive(b.ctx, false)
_, err = directoryClient.Delete(recursiveContext, nil)
return (err == nil)
}
default:
panic("not implemented, check your code")
}

b.folderManager.RecordChildExists(objURL)
Expand Down
4 changes: 2 additions & 2 deletions cmd/zt_make_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func TestMakeFileShareQuota(t *testing.T) {

args := rawMakeCmdArgs{
resourceToCreate: scSAS.String(),
quota: 5,
quota: 5,
}

runMakeAndVerify(args, func(err error) {
Expand Down Expand Up @@ -191,4 +191,4 @@ func TestMakeFileShareExists(t *testing.T) {
_, err = sc.GetProperties(ctx, nil)
a.Nil(err)
})
}
}
4 changes: 3 additions & 1 deletion common/hash_data.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,21 @@ var LocalHashStorageMode = EHashStorageMode.Default()
var LocalHashDir = ""

var hashDataFailureLogOnce = &sync.Once{}

func LogHashStorageFailure() {
hashDataFailureLogOnce.Do(func() {
lcm.Info("One or more hash storage operations (read/write) have failed. Check the scanning log for details.")
})
}

type HashStorageMode uint8

var EHashStorageMode = HashStorageMode(0)

func (HashStorageMode) HiddenFiles() HashStorageMode { return 0 }

func (e *HashStorageMode) Default() HashStorageMode {
if defaulter, ok := any(e).(interface{osDefault() HashStorageMode}); ok { // allow specific OSes to override the default functionality
if defaulter, ok := any(e).(interface{ osDefault() HashStorageMode }); ok { // allow specific OSes to override the default functionality
return defaulter.osDefault()
}

Expand Down
10 changes: 8 additions & 2 deletions common/hash_data_adapter_hidden_files.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ type HiddenFileDataAdapter struct {
}

func (a *HiddenFileDataAdapter) GetMode() HashStorageMode {
return EHashStorageMode.Default()
return EHashStorageMode.HiddenFiles()
}

func (a *HiddenFileDataAdapter) getHashPath(relativePath string) string {
Expand All @@ -26,6 +26,12 @@ func (a *HiddenFileDataAdapter) getHashPath(relativePath string) string {
dir, fName := filepath.Split(relativePath)
fName = fmt.Sprintf(".%s%s", fName, AzCopyHashDataStream)

// Try to create the directory
err := os.Mkdir(filepath.Join(basePath, dir), 0775)
if err != nil && !os.IsExist(err) {
lcm.Warn("Failed to create hash data directory")
}

return filepath.Join(basePath, dir, fName)
}

Expand Down Expand Up @@ -76,7 +82,7 @@ func (a *HiddenFileDataAdapter) SetHashData(relativePath string, data *SyncHashD
}

// Push types around to check for OS-specific hide file method
if adapter, canHide := any(a).(interface{HideFile(string) error}); canHide {
if adapter, canHide := any(a).(interface{ HideFile(string) error }); canHide {
dataFile := a.getDataPath(relativePath)

err := adapter.HideFile(dataFile)
Expand Down
52 changes: 47 additions & 5 deletions e2etest/newe2e_asserter.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,13 @@ import (
"testing"
)

var _ Asserter = &FrameworkAsserter{}
var _ ScenarioAsserter = &ScenarioVariationManager{} // covers all 3 interfaces

// ====== Asserter ======

type Asserter interface {
NoError(comment string, err error)
NoError(comment string, err error, failNow ...bool)
// Assert fails the test, but does not exit.
Assert(comment string, assertion Assertion, items ...any)
// AssertNow wraps Assert, and exits if failed.
Expand All @@ -23,6 +26,8 @@ type Asserter interface {

// Failed returns if the test has already failed.
Failed() bool
// HelperMarker returns the associated *testing.T, and if there is none, a NilHelperMarker.
HelperMarker() HelperMarker
}

type DryrunAsserter interface {
Expand All @@ -39,6 +44,15 @@ type ScenarioAsserter interface {
Cleanup(func(a ScenarioAsserter))
}

// HelperMarker handles the fact that testing.T can be sometimes nil, and that we can't indicate a depth to ignore with Helper()
type HelperMarker interface {
Helper()
}

type NilHelperMarker struct{}

func (NilHelperMarker) Helper() {}

// ====== Assertion ======

type Assertion interface {
Expand Down Expand Up @@ -118,15 +132,35 @@ func (ta *FrameworkAsserter) Log(format string, a ...any) {
ta.t.Log(fmt.Sprintf(format, a...))
}

func (ta *FrameworkAsserter) NoError(comment string, err error) {
func (ta *FrameworkAsserter) NoError(comment string, err error, failNow ...bool) {
ta.t.Helper()
ta.AssertNow(comment, IsNil{}, err)

if err != nil {
ta.t.Logf("Error was not nil (%s): %v", comment, err)

if FirstOrZero(failNow) {
ta.t.FailNow()
} else {
ta.t.Fail()
}
}
}

func (ta *FrameworkAsserter) AssertNow(comment string, assertion Assertion, items ...any) {
ta.t.Helper()
ta.Assert(comment, assertion, items...)
if ta.Failed() {

if (assertion.MinArgs() > 0 && len(items) < assertion.MinArgs()) || (assertion.MaxArgs() > 0 && len(items) > assertion.MaxArgs()) {
ta.PrintFinalizingMessage("Failed to assert: Assertion %s supports argument counts between %d and %d, but received %d args.", assertion.Name(), assertion.MinArgs(), assertion.MaxArgs(), len(items))
ta.t.FailNow()
}

if !assertion.Assert(items...) {
if fa, ok := assertion.(FormattedAssertion); ok {
ta.PrintFinalizingMessage("Failed assertion %s: %s; %s", fa.Name(), fa.Format(items...), comment)
} else {
ta.PrintFinalizingMessage("Failed assertion %s with item(s): %v; %s", assertion.Name(), items, comment)
}

ta.t.FailNow()
}
}
Expand Down Expand Up @@ -165,3 +199,11 @@ func (ta *FrameworkAsserter) Failed() bool {
ta.t.Helper()
return ta.t.Failed()
}

func (ta *FrameworkAsserter) HelperMarker() HelperMarker {
if ta.t != nil {
return ta.t
}

return NilHelperMarker{}
}
7 changes: 6 additions & 1 deletion e2etest/newe2e_object_content.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package e2etest

import (
"bytes"
"crypto/md5"
"github.com/Azure/azure-storage-azcopy/v10/cmd"
"github.com/Azure/azure-storage-azcopy/v10/common"
"io"
Expand All @@ -11,7 +12,7 @@ import (
type ObjectContentContainer interface {
Size() int64
Reader() io.ReadSeeker
//MD5() [md5.Size]byte
MD5() [md5.Size]byte
//CRC64() uint64
}

Expand Down Expand Up @@ -48,3 +49,7 @@ func (o *ObjectContentContainerBuffer) Size() int64 {
func (o *ObjectContentContainerBuffer) Reader() io.ReadSeeker {
return bytes.NewReader(o.Data)
}

func (o *ObjectContentContainerBuffer) MD5() [md5.Size]byte {
return md5.Sum(o.Data)
}
4 changes: 3 additions & 1 deletion e2etest/newe2e_resource_manager_interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,9 @@ type ObjectResourceManager interface {
EntityType() common.EntityType
ContainerName() string
ObjectName() string
// Create attempts to create an object. Should overwrite objects if they already exist. It is expected to attempt to track object creation.
// Create attempts to create an object. Should overwrite objects if they already exist.
// It is expected to attempt to track object creation.
// It is also expected to create parents, if required.
Create(a Asserter, body ObjectContentContainer, properties ObjectProperties)
// Delete attempts to delete an object. NotFound type errors are ignored.
Delete(a Asserter)
Expand Down
6 changes: 4 additions & 2 deletions e2etest/newe2e_resource_managers_blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -684,8 +684,10 @@ func (b *BlobObjectResourceManager) Download(a Asserter) io.ReadSeeker {
a.NoError("Download stream", err)

buf := &bytes.Buffer{}
_, err = io.Copy(buf, resp.Body)
a.NoError("Read body", err)
if err == nil && resp.Body != nil {
_, err = io.Copy(buf, resp.Body)
a.NoError("Read body", err)
}

return bytes.NewReader(buf.Bytes())
}
Expand Down

0 comments on commit 8aec227

Please sign in to comment.