diff --git a/awss3/store.go b/awss3/store.go index f71a8d5..da3497d 100644 --- a/awss3/store.go +++ b/awss3/store.go @@ -10,9 +10,10 @@ import ( "sync" "time" + "context" + "github.com/araddon/gou" "github.com/pborman/uuid" - "golang.org/x/net/context" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" @@ -446,27 +447,32 @@ func (f *FS) NewWriterWithContext(ctx context.Context, objectName string, metada return nil, fmt.Errorf("options IfNotExists not supported for store type") } - // Create an uploader with the session and default options - uploader := s3manager.NewUploader(f.sess) + rwc := csbufio.NewBackgroundWriteCloser(ctx, + func(ctx context.Context, rc io.ReadCloser) error { + defer rc.Close() + + uploader := s3manager.NewUploader(f.sess) + + // TODO: this needs to be managed, ie shutdown signals, close, handler err etc. + + // Upload the file to S3. + _, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{ + Bucket: aws.String(f.bucket), + Key: aws.String(objectName), + Body: rc, + }) + if err != nil { + err = fmt.Errorf("unable to write %q to s3 bucket %q: %w", objectName, f.bucket, err) - pr, pw := io.Pipe() - bw := csbufio.NewWriter(ctx, pw) + gou.Warnf("could not upload %v", err) - go func() { - // TODO: this needs to be managed, ie shutdown signals, close, handler err etc. + return err + } - // Upload the file to S3. - _, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{ - Bucket: aws.String(f.bucket), - Key: aws.String(objectName), - Body: pr, + return nil }) - if err != nil { - gou.Warnf("could not upload %v", err) - } - }() - return bw, nil + return rwc, nil } // Delete requested object path string. diff --git a/azure/store.go b/azure/store.go index 6063fef..94e7690 100644 --- a/azure/store.go +++ b/azure/store.go @@ -1,7 +1,6 @@ package azure import ( - "bufio" "encoding/base64" "encoding/binary" "fmt" @@ -11,12 +10,14 @@ import ( "strings" "time" + "context" + az "github.com/Azure/azure-sdk-for-go/storage" "github.com/araddon/gou" - "github.com/lytics/cloudstorage" "github.com/pborman/uuid" - "golang.org/x/net/context" - "golang.org/x/sync/errgroup" + + "github.com/lytics/cloudstorage" + "github.com/lytics/cloudstorage/csbufio" ) const ( @@ -384,70 +385,22 @@ func (f *FS) NewWriterWithContext(ctx context.Context, name string, metadata map return nil, fmt.Errorf("options IfNotExists not supported for store type") } name = strings.Replace(name, " ", "+", -1) - o := &object{name: name, metadata: metadata} - rwc := newAzureWriteCloser(ctx, f, o) + obj := &object{name: name, metadata: metadata} - return rwc, nil -} - -// azureWriteCloser - manages data and go routines used to pipe data to azures, calling Close -// will flush data to azures and block until all inflight data has been written or -// we get an error. -type azureWriteCloser struct { - pr *io.PipeReader - pw *io.PipeWriter - wc *bufio.Writer - g *errgroup.Group -} - -// azureWriteCloser is a io.WriteCloser that manages the azure connection pipe and when Close is called -// it blocks until all data is flushed to azure via a background go routine call to uploadMultiPart. -func newAzureWriteCloser(ctx context.Context, f *FS, obj *object) io.WriteCloser { - pr, pw := io.Pipe() - bw := bufio.NewWriter(pw) - - g, _ := errgroup.WithContext(ctx) - - g.Go(func() error { - // Upload the file to azure. - // Do a multipart upload - err := f.uploadMultiPart(obj, pr) - if err != nil { - gou.Warnf("could not upload %v", err) - return err - } - return nil - }) + rwc := csbufio.NewBackgroundWriteCloser(ctx, + func(_ context.Context, rc io.ReadCloser) error { + err := f.uploadMultiPart(obj, rc) + if err != nil { + err = fmt.Errorf("unable to write %q to azure bucket %q: %w", obj.name, f.bucket, err) - return azureWriteCloser{ - pr, pw, bw, g, - } -} + gou.Warnf("could not upload %v", err) -// Write writes data to our write buffer, which writes to the backing io pipe. -// If an error is encountered while writting we may not see it here, my guess is -// we wouldn't see it until someone calls close and the error is returned from the -// error group. -func (bc azureWriteCloser) Write(p []byte) (nn int, err error) { - return bc.wc.Write(p) -} + return err + } + return nil + }) -// Close and block until we flush inflight data to azures -func (bc azureWriteCloser) Close() error { - //Flush buffered data to the backing pipe writer. - if err := bc.wc.Flush(); err != nil { - return err - } - //Close the pipe writer so that the pipe reader will return EOF, - // doing so will cause uploadMultiPart to complete and return. - if err := bc.pw.Close(); err != nil { - return err - } - //Use the error group's Wait method to block until uploadMultPart has completed - if err := bc.g.Wait(); err != nil { - return err - } - return nil + return rwc, nil } const ( diff --git a/csbufio/background_write_closer.go b/csbufio/background_write_closer.go new file mode 100644 index 0000000..c1b982f --- /dev/null +++ b/csbufio/background_write_closer.go @@ -0,0 +1,102 @@ +package csbufio + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + + "github.com/araddon/gou" + "golang.org/x/sync/errgroup" +) + +var _ io.WriteCloser = (*backgroundWriteCloser)(nil) + +// backgroundWriteCloser - manages data and go routines used to pipe data to the cloud, calling Close +// will flush data to the cloud and block until all inflight data has been written or +// we get an error. +type backgroundWriteCloser struct { + pipeWriter io.Closer + buffioWriter *bufio.Writer + backgroundJob *errgroup.Group + done chan struct{} +} + +// NewBackgroundWriteCloser - returns an io.WriteCloser that manages the cloud connection pipe and when Close is called +// it blocks until all data is flushed to the cloud via a background go routine call to uploadMultiPart. +func NewBackgroundWriteCloser(ctx context.Context, job func(context.Context, io.ReadCloser) error) io.WriteCloser { + pipeReader, pipeWriter := io.Pipe() + buffioWriter := bufio.NewWriter(pipeWriter) + + var backgroundJob errgroup.Group + + backgroundJob.Go(func() error { + err := job(ctx, pipeReader) + if err != nil { + gou.Warnf("could not upload %v", err) + + return err + } + + return nil + }) + + done := make(chan struct{}) + + return &backgroundWriteCloser{ + pipeWriter, buffioWriter, &backgroundJob, done, + } +} + +var errAlreadyClosed = errors.New("writer already closed") + +// Write writes data to our write buffer, which writes to the backing io pipe. +// If an error is encountered while writting we may not see it here, my guess is +// we wouldn't see it until someone calls close and the error is returned from the +// error group. +func (bc *backgroundWriteCloser) Write(p []byte) (nn int, err error) { + select { + case <-bc.done: + return 0, errAlreadyClosed + default: + } + + return bc.buffioWriter.Write(p) +} + +// Close and block until we flush inflight data to the cloud +func (bc *backgroundWriteCloser) Close() error { + select { + case <-bc.done: + return nil + default: + close(bc.done) + } + + var errs []error + + //Flush buffered data to the backing pipe writer. + if err := bc.buffioWriter.Flush(); err != nil { + errs = append(errs, fmt.Errorf("unable to flush buffer: %w", err)) + } + + //Close the pipe writer so that the pipe reader will return EOF, + // doing so will cause uploadMultiPart to complete and return. + if err := bc.pipeWriter.Close(); err != nil { + errs = append(errs, fmt.Errorf("unable to close pipe: %w", err)) + } + //Use the error group's Wait method to block until upload has completed + if err := bc.backgroundJob.Wait(); err != nil { + errs = append(errs, fmt.Errorf("error from background job: %w", err)) + } + + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errors.Join(errs...) + } +} diff --git a/csbufio/background_write_closer_test.go b/csbufio/background_write_closer_test.go new file mode 100644 index 0000000..02ba8d5 --- /dev/null +++ b/csbufio/background_write_closer_test.go @@ -0,0 +1,103 @@ +package csbufio_test + +import ( + "context" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/lytics/cloudstorage/csbufio" +) + +func TestBackgroundWriteCloser(t *testing.T) { + t.Parallel() + + testcases := []struct { + label string + mustCancelCtx bool + closeErrMsg string + }{ + { + label: "test success on write and close", + mustCancelCtx: false, + }, + { + label: "test success on write but close return error", + mustCancelCtx: true, + closeErrMsg: "error from background job: context canceled", + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.label, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + if tc.mustCancelCtx { + cancel() + } + + var dataSentToCloud []byte + wc := csbufio.NewBackgroundWriteCloser(ctx, func(ctx context.Context, rc io.ReadCloser) error { + defer rc.Close() + + data, err := io.ReadAll(rc) + if err != nil { + return err + } + + dataSentToCloud = data + + return ctx.Err() + }) + + _, err := wc.Write([]byte("foo")) + require.NoError(t, err) + + err = wc.Close() + if tc.closeErrMsg != "" { + require.EqualError(t, err, tc.closeErrMsg) + + return + } + + require.NoError(t, err) + + assert.Equal(t, []byte("foo"), dataSentToCloud) + }) + } +} + +func TestBackgroundWriteCloserCheckWriteAfterClose(t *testing.T) { + t.Parallel() + ctx := context.Background() + wc := csbufio.NewBackgroundWriteCloser(ctx, func(ctx context.Context, rc io.ReadCloser) error { + defer rc.Close() + + return nil + }) + + require.NoError(t, wc.Close()) + + _, err := wc.Write([]byte("foo")) + require.EqualError(t, err, "writer already closed") +} + +func TestBackgroundWriteCloserCheckMultipleCloses(t *testing.T) { + t.Parallel() + ctx := context.Background() + wc := csbufio.NewBackgroundWriteCloser(ctx, func(ctx context.Context, rc io.ReadCloser) error { + defer rc.Close() + + return nil + }) + + require.NoError(t, wc.Close()) + + require.NoError(t, wc.Close(), "close an already closed wc should not trigger an error") +} diff --git a/csbufio/reader_test.go b/csbufio/reader_test.go index 4766fb8..155d913 100644 --- a/csbufio/reader_test.go +++ b/csbufio/reader_test.go @@ -22,7 +22,7 @@ func TestReaderContextDone(t *testing.T) { n, err := rc.Read(p) require.ErrorIs(t, err, context.Canceled) require.Equal(t, 0, n) - require.Len(t, p, 0) + require.Empty(t, p) err = rc.Close() require.ErrorIs(t, err, context.Canceled) diff --git a/csbufio/writer_test.go b/csbufio/writer_test.go index a443fcd..fc62f67 100644 --- a/csbufio/writer_test.go +++ b/csbufio/writer_test.go @@ -26,7 +26,7 @@ func TestWriterContextDone(t *testing.T) { b, err := io.ReadAll(pr) require.NoError(t, err, "error reading") - require.Equal(t, 0, len(b), "") + require.Empty(t, b, "") err = wc.Close() require.ErrorIs(t, err, context.Canceled) diff --git a/doc.go b/doc.go index 6e406fd..8085757 100644 --- a/doc.go +++ b/doc.go @@ -9,8 +9,7 @@ systems. Then the methods (Query, filter, get, put) are common, as are the Files (Objects) themselves. Writing code that supports multiple backends is now simple. - -Creating and iterating files +# Creating and iterating files In this example we are going to create a local-filesystem store. diff --git a/go.mod b/go.mod index 90f859e..5ae7248 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,11 @@ module github.com/lytics/cloudstorage -go 1.19 +go 1.20 require ( cloud.google.com/go/storage v1.28.0 github.com/Azure/azure-sdk-for-go v67.1.0+incompatible + github.com/acomagu/bufpipe v1.0.4 github.com/araddon/gou v0.0.0-20211019181548-e7d08105776c github.com/aws/aws-sdk-go v1.44.146 github.com/pborman/uuid v1.2.1 @@ -17,8 +18,6 @@ require ( google.golang.org/api v0.103.0 ) -require github.com/acomagu/bufpipe v1.0.4 - require ( cloud.google.com/go v0.105.0 // indirect cloud.google.com/go/compute v1.12.1 // indirect diff --git a/google/apistore.go b/google/apistore.go index ee7e894..288433f 100644 --- a/google/apistore.go +++ b/google/apistore.go @@ -1,8 +1,9 @@ package google import ( - "github.com/lytics/cloudstorage" "google.golang.org/api/storage/v1" + + "github.com/lytics/cloudstorage" ) // APIStore a google api store diff --git a/google/client.go b/google/client.go index 25856d0..3058d25 100644 --- a/google/client.go +++ b/google/client.go @@ -5,8 +5,9 @@ import ( "net/http" "os" + "context" + "cloud.google.com/go/storage" - "golang.org/x/net/context" "golang.org/x/oauth2" googleOauth2 "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" diff --git a/google/google_test.go b/google/google_test.go index 9152396..c49a9b9 100644 --- a/google/google_test.go +++ b/google/google_test.go @@ -6,6 +6,7 @@ import ( "testing" "cloud.google.com/go/storage" + "github.com/lytics/cloudstorage" "github.com/lytics/cloudstorage/google" "github.com/lytics/cloudstorage/testutils" diff --git a/google/store.go b/google/store.go index 3d87c0d..f2cfca4 100644 --- a/google/store.go +++ b/google/store.go @@ -11,10 +11,11 @@ import ( "strings" "time" + "context" + "cloud.google.com/go/storage" "github.com/araddon/gou" "github.com/pborman/uuid" - "golang.org/x/net/context" "google.golang.org/api/iterator" "github.com/lytics/cloudstorage" diff --git a/google/storeutils/const.go b/google/storeutils/const.go index 7bee7d6..3b0a9e2 100644 --- a/google/storeutils/const.go +++ b/google/storeutils/const.go @@ -9,13 +9,13 @@ import ( // Copied from cloudstorage var GCSRetries = 10 -//backoff sleeps a random amount so we can. -//retry failed requests using a randomized exponential backoff: -//wait a random period between [0..1] seconds and retry; if that fails, -//wait a random period between [0..2] seconds and retry; if that fails, -//wait a random period between [0..4] seconds and retry, and so on, -//with an upper bounds to the wait period being 16 seconds. -//http://play.golang.org/p/l9aUHgiR8J +// backoff sleeps a random amount so we can. +// retry failed requests using a randomized exponential backoff: +// wait a random period between [0..1] seconds and retry; if that fails, +// wait a random period between [0..2] seconds and retry; if that fails, +// wait a random period between [0..4] seconds and retry, and so on, +// with an upper bounds to the wait period being 16 seconds. +// http://play.golang.org/p/l9aUHgiR8J func backoff(try int) { nf := math.Pow(2, float64(try)) nf = math.Max(1, nf) diff --git a/google/storeutils/get.go b/google/storeutils/get.go index 9609750..1b0c9ea 100644 --- a/google/storeutils/get.go +++ b/google/storeutils/get.go @@ -4,8 +4,9 @@ import ( "bytes" "io" + "context" + "cloud.google.com/go/storage" - "golang.org/x/net/context" "github.com/lytics/cloudstorage" ) diff --git a/google/storeutils/utils.go b/google/storeutils/utils.go index eb803d2..abdbe07 100644 --- a/google/storeutils/utils.go +++ b/google/storeutils/utils.go @@ -1,8 +1,9 @@ package storeutils import ( + "context" + "github.com/lytics/cloudstorage" - "golang.org/x/net/context" ) // GetAndOpen is a convenience method that combines Store.Get() and Object.Open() into diff --git a/google/storeutils/utils_test.go b/google/storeutils/utils_test.go index 1d7c4eb..fc8a855 100644 --- a/google/storeutils/utils_test.go +++ b/google/storeutils/utils_test.go @@ -4,8 +4,9 @@ import ( "os" "testing" + "context" + "cloud.google.com/go/storage" - "golang.org/x/net/context" "google.golang.org/api/option" "github.com/lytics/cloudstorage" diff --git a/iterator.go b/iterator.go index 863b406..e721049 100644 --- a/iterator.go +++ b/iterator.go @@ -5,7 +5,8 @@ import ( "math/rand" "time" - "golang.org/x/net/context" + "context" + "google.golang.org/api/iterator" ) diff --git a/localfs/store.go b/localfs/store.go index 1b78941..0b42482 100644 --- a/localfs/store.go +++ b/localfs/store.go @@ -13,12 +13,14 @@ import ( "syscall" "time" + "context" + "github.com/araddon/gou" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/csbufio" "github.com/pborman/uuid" - "golang.org/x/net/context" "google.golang.org/api/iterator" + + "github.com/lytics/cloudstorage" + "github.com/lytics/cloudstorage/csbufio" ) func init() { diff --git a/localfs/store_test.go b/localfs/store_test.go index 4ab7d90..cbbdf67 100644 --- a/localfs/store_test.go +++ b/localfs/store_test.go @@ -5,10 +5,11 @@ import ( "path/filepath" "testing" + "github.com/stretchr/testify/require" + "github.com/lytics/cloudstorage" "github.com/lytics/cloudstorage/localfs" "github.com/lytics/cloudstorage/testutils" - "github.com/stretchr/testify/require" ) func TestAll(t *testing.T) { diff --git a/registry_test.go b/registry_test.go index 1519d6b..e90dbaa 100644 --- a/registry_test.go +++ b/registry_test.go @@ -4,8 +4,9 @@ import ( "fmt" "testing" - "github.com/lytics/cloudstorage" "github.com/stretchr/testify/require" + + "github.com/lytics/cloudstorage" ) func TestRegistry(t *testing.T) { diff --git a/sftp/store.go b/sftp/store.go index 0f13fe9..8036172 100644 --- a/sftp/store.go +++ b/sftp/store.go @@ -9,11 +9,12 @@ import ( "strings" "time" + "context" + "github.com/araddon/gou" "github.com/pborman/uuid" ftp "github.com/pkg/sftp" "golang.org/x/crypto/ssh" - "golang.org/x/net/context" "github.com/lytics/cloudstorage" ) diff --git a/store.go b/store.go index 98354b8..6d1891c 100644 --- a/store.go +++ b/store.go @@ -8,8 +8,9 @@ import ( "strings" "time" + "context" + "github.com/araddon/gou" - "golang.org/x/net/context" ) const ( diff --git a/store_test.go b/store_test.go index 69b91cd..aa7b88b 100644 --- a/store_test.go +++ b/store_test.go @@ -5,9 +5,10 @@ import ( "path/filepath" "testing" + "github.com/stretchr/testify/require" + "github.com/lytics/cloudstorage" "github.com/lytics/cloudstorage/localfs" - "github.com/stretchr/testify/require" ) func TestStore(t *testing.T) { @@ -35,7 +36,7 @@ func TestStore(t *testing.T) { } store, err = cloudstorage.NewStore(localFsConf) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, store) } @@ -59,10 +60,10 @@ func TestJwtConf(t *testing.T) { // t.Logf("b64 %q", v) conf := &cloudstorage.Config{} err := json.Unmarshal([]byte(configInput), conf) - require.Nil(t, err) + require.NoError(t, err) conf.JwtConf.PrivateKey = "------helo-------\naGVsbG8td29ybGQ=\n-----------------end--------" require.NotNil(t, conf.JwtConf) - require.Nil(t, conf.JwtConf.Validate()) + require.NoError(t, conf.JwtConf.Validate()) require.Equal(t, "aGVsbG8td29ybGQ=", conf.JwtConf.PrivateKey) require.Equal(t, "service_account", conf.JwtConf.Type) @@ -83,9 +84,9 @@ func TestJwtConf(t *testing.T) { }` conf = &cloudstorage.Config{} err = json.Unmarshal([]byte(configInput), conf) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, conf.JwtConf) - require.Nil(t, conf.JwtConf.Validate()) + require.NoError(t, conf.JwtConf.Validate()) require.Equal(t, "aGVsbG8td29ybGQ=", conf.JwtConf.PrivateKey) require.Equal(t, "service_account", conf.JwtConf.Type) } diff --git a/testutils/testutils.go b/testutils/testutils.go index 171aa7b..66d149f 100644 --- a/testutils/testutils.go +++ b/testutils/testutils.go @@ -18,9 +18,10 @@ import ( "time" "github.com/araddon/gou" - "github.com/lytics/cloudstorage" "github.com/stretchr/testify/require" "google.golang.org/api/iterator" + + "github.com/lytics/cloudstorage" ) var ( @@ -324,7 +325,7 @@ func ensureContents(t *testing.T, store cloudstorage.Store, name, data, msg stri caller := caller(2) obj, err := store.Get(context.Background(), name) - require.Equalf(t, nil, err, msg, caller) + require.NoErrorf(t, err, msg, caller) if err != nil { return } @@ -334,13 +335,13 @@ func ensureContents(t *testing.T, store cloudstorage.Store, name, data, msg stri f, err := obj.Open(cloudstorage.ReadOnly) defer func() { err = obj.Close() - require.Equalf(t, nil, err, msg, caller) + require.NoErrorf(t, err, msg, caller) }() - require.Equalf(t, nil, err, msg, caller) + require.NoErrorf(t, err, msg, caller) require.Equalf(t, fmt.Sprintf("%p", f), fmt.Sprintf("%p", obj.File()), msg, caller) bytes, err := io.ReadAll(f) - require.Equalf(t, nil, err, msg, caller) + require.NoErrorf(t, err, msg, caller) require.Equalf(t, data, string(bytes), msg, caller) } @@ -361,14 +362,14 @@ func Copy(t *testing.T, store cloudstorage.Store) { obj := createFile(t, store, "from/test.csv", testcsv) dest, err := store.NewObject("to/testcopy.csv") - require.Equalf(t, nil, err, caller) + require.NoErrorf(t, err, caller) err = cloudstorage.Copy(context.Background(), store, obj, dest) - require.Equalf(t, nil, err, caller) + require.NoErrorf(t, err, caller) // After copy, old should exist obj2, err := store.Get(context.Background(), "from/test.csv") - require.Equalf(t, nil, err, caller) + require.NoErrorf(t, err, caller) require.Equalf(t, "from/test.csv", obj2.Name(), caller) // And also to should exist @@ -491,7 +492,7 @@ func ListObjsAndFolders(t *testing.T, store cloudstorage.Store) { createObjects := func(names []string) { for _, n := range names { obj, err := store.NewObject(n) - require.Equalf(t, nil, err, "failed trying to call new object on:%v of %v", n, names) + require.NoErrorf(t, err, "failed trying to call new object on:%v of %v", n, names) if obj == nil { continue } @@ -533,20 +534,20 @@ func ListObjsAndFolders(t *testing.T, store cloudstorage.Store) { iter, _ := store.Objects(context.Background(), q) objs, err := cloudstorage.ObjectsAll(iter) require.NoError(t, err) - require.Equal(t, 15, len(objs), "incorrect list len. wanted 15 got %d", len(objs)) + require.Len(t, objs, 15, "incorrect list len. wanted 15 got %d", len(objs)) iter.Close() iter, _ = store.Objects(context.Background(), q) objr, err := cloudstorage.ObjectResponseFromIter(iter) require.NoError(t, err) - require.Equal(t, 15, len(objr.Objects), "incorrect list len. wanted 15 got %d", len(objr.Objects)) + require.Len(t, objr.Objects, 15, "incorrect list len. wanted 15 got %d", len(objr.Objects)) // Now we are going to re-run this test using store.List() instead of store.Objects() q = cloudstorage.NewQuery("list-test/") q.Sorted() objResp, err := store.List(context.Background(), q) require.NoError(t, err) - require.Equal(t, 15, len(objResp.Objects), "incorrect list len. wanted 15 got %d", len(objResp.Objects)) + require.Len(t, objResp.Objects, 15, "incorrect list len. wanted 15 got %d", len(objResp.Objects)) // Now we are going to re-run this test using an Object Iterator // that uses store.List() instead of store.Objects() @@ -566,14 +567,14 @@ func ListObjsAndFolders(t *testing.T, store cloudstorage.Store) { require.Equal(t, names[i], o.Name(), "unexpected name.") i++ } - require.Equal(t, 15, len(objs), "incorrect list len. wanted 15 got %d", len(objs)) + require.Len(t, objs, 15, "incorrect list len. wanted 15 got %d", len(objs)) q = cloudstorage.NewQuery("list-test/b") q.Sorted() iter, _ = store.Objects(context.Background(), q) objs, err = cloudstorage.ObjectsAll(iter) require.NoError(t, err) - require.Equal(t, 5, len(objs), "incorrect list len. wanted 5 got %d", len(objs)) + require.Len(t, objs, 5, "incorrect list len. wanted 5 got %d", len(objs)) for i, o := range objs { require.Equal(t, names[i+5], o.Name(), "unexpected name.") @@ -594,12 +595,12 @@ func ListObjsAndFolders(t *testing.T, store cloudstorage.Store) { i++ } - require.Equal(t, 5, len(objs), "incorrect list len.") + require.Len(t, objs, 5, "incorrect list len.") q = cloudstorage.NewQueryForFolders("list-test/") folders, err = store.Folders(context.Background(), q) require.NoError(t, err) - require.Equal(t, 3, len(folders), "incorrect list len. wanted 3 folders. %v", folders) + require.Len(t, folders, 3, "incorrect list len. wanted 3 folders. %v", folders) sort.Strings(folders) require.Equal(t, []string{"list-test/a/", "list-test/b/", "list-test/c/"}, folders) @@ -620,20 +621,20 @@ func ListObjsAndFolders(t *testing.T, store cloudstorage.Store) { q.PageSize = 500 folders, err = store.Folders(context.Background(), q) require.NoError(t, err) - require.Equal(t, 3, len(folders), "incorrect list len. wanted 3 folders. %v", folders) + require.Len(t, folders, 3, "incorrect list len. wanted 3 folders. %v", folders) require.Equal(t, []string{"list-test/a/", "list-test/b/", "list-test/c/"}, folders) q = cloudstorage.NewQueryForFolders("list-test/b/") folders, err = store.Folders(context.Background(), q) require.NoError(t, err) - require.Equal(t, 2, len(folders), "incorrect list len. wanted 2 folders. %v", folders) + require.Len(t, folders, 2, "incorrect list len. wanted 2 folders. %v", folders) require.Equal(t, []string{"list-test/b/b1/", "list-test/b/b2/"}, folders) ctx, cancel := context.WithCancel(context.Background()) cancel() folders, err = store.Folders(ctx, q) require.Error(t, err) - require.Equal(t, 0, len(folders), "incorrect list len. wanted 0 folders. %v", folders) + require.Empty(t, folders, "incorrect list len. wanted 0 folders. %v", folders) // List objects from a missing folder q = cloudstorage.NewQuery("does-not-exist/") @@ -768,12 +769,12 @@ func TestReadWriteCloser(t *testing.T, store cloudstorage.Store) { data := fmt.Sprintf("pad:%v:pid:%v:time:%v:index:%v:", padding, os.Getpid(), time.Now().Nanosecond(), i) wc, err := store.NewWriter(fileName, nil) - require.Equalf(t, nil, err, "at loop-cnt:%v", i) + require.NoErrorf(t, err, "at loop-cnt:%v", i) buf1 := bytes.NewBufferString(data) _, err = buf1.WriteTo(wc) - require.Equalf(t, nil, err, "at loop-cnt:%v", i) + require.NoErrorf(t, err, "at loop-cnt:%v", i) err = wc.Close() - require.Equalf(t, nil, err, "at loop-cnt:%v", i) + require.NoErrorf(t, err, "at loop-cnt:%v", i) time.Sleep(time.Millisecond * 100) wc, err = store.NewWriterWithContext(context.Background(), fileName, nil, cloudstorage.Opts{IfNotExists: true}) @@ -790,18 +791,18 @@ func TestReadWriteCloser(t *testing.T, store cloudstorage.Store) { deleteIfExists(store, "prefix/test.csv") rc, err := store.NewReader(fileName) - require.Equalf(t, nil, err, "at loop-cnt:%v", i) + require.NoErrorf(t, err, "at loop-cnt:%v", i) if rc == nil { t.Fatalf("could not create reader") return } buf2 := bytes.Buffer{} _, err = buf2.ReadFrom(rc) - require.Equalf(t, nil, err, "at loop-cnt:%v", i) + require.NoErrorf(t, err, "at loop-cnt:%v", i) require.Equalf(t, data, buf2.String(), "round trip data don't match: loop-cnt:%v", i) // extra data means we didn't truncate the file // make sure we clean up and close - require.Nil(t, rc.Close()) + require.NoError(t, rc.Close()) _, err = store.NewReader("bogus/notreal.csv") require.Equalf(t, cloudstorage.ErrObjectNotFound, err, "at loop-cnt:%v", i) @@ -886,7 +887,7 @@ func MultipleRW(t *testing.T, store cloudstorage.Store, conf *cloudstorage.Confi require.Equal(t, fmt.Sprintf("%p", f2), fmt.Sprintf("%p", obj2.File())) bytes, err := io.ReadAll(f2) require.NoError(t, err) - require.Nil(t, f2.Close()) + require.NoError(t, f2.Close()) require.Equal(t, data, string(bytes))