diff --git a/README.md b/README.md index 4eb957f..9ad7328 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,10 @@ Cloudstorage is an library for working with Cloud Storage (Google, AWS, Azure) and SFTP, Local Files. It provides a unified api for local files, sftp and Cloud files that aids testing and operating on multiple cloud storage. -[![Code Coverage](https://codecov.io/gh/lytics/cloudstorage/branch/master/graph/badge.svg)](https://codecov.io/gh/lytics/cloudstorage) -[![GoDoc](https://godoc.org/github.com/lytics/cloudstorage?status.svg)](http://godoc.org/github.com/lytics/cloudstorage) -[![Build Status](https://travis-ci.org/lytics/cloudstorage.svg?branch=master)](https://travis-ci.org/lytics/cloudstorage) -[![Go ReportCard](https://goreportcard.com/badge/lytics/cloudstorage)](https://goreportcard.com/report/lytics/cloudstorage) +[![Code Coverage](https://codecov.io/gh/dvriesman/cloudstorage/branch/master/graph/badge.svg)](https://codecov.io/gh/dvriesman/cloudstorage) +[![GoDoc](https://godoc.org/github.com/dvriesman/cloudstorage?status.svg)](http://godoc.org/github.com/dvriesman/cloudstorage) +[![Build Status](https://travis-ci.org/dvriesman/cloudstorage.svg?branch=master)](https://travis-ci.org/dvriesman/cloudstorage) +[![Go ReportCard](https://goreportcard.com/badge/dvriesman/cloudstorage)](https://goreportcard.com/report/dvriesman/cloudstorage) **Features** * Provide single unified api for multiple cloud (google, azure, aws) & local files. @@ -24,7 +24,7 @@ Note: For these examples all errors are ignored, using the `_` for them. ##### Creating a Store object: ```go // This is an example of a local storage object: -// See(https://github.com/lytics/cloudstorage/blob/master/google/google_test.go) for a GCS example: +// See(https://github.com/dvriesman/cloudstorage/blob/master/google/google_test.go) for a GCS example: config := &cloudstorage.Config{ Type: localfs.StoreType, AuthMethod: localfs.AuthFileSystem, @@ -98,14 +98,14 @@ resp, _ := transferer.NewTransfer(config) ``` -See [testsuite.go](https://github.com/lytics/cloudstorage/blob/master/testutils/testutils.go) for more examples +See [testsuite.go](https://github.com/dvriesman/cloudstorage/blob/master/testutils/testutils.go) for more examples ## Testing Due to the way integration tests act against a cloud bucket and objects; run tests without parallelization. ``` -cd $GOPATH/src/github.com/lytics/cloudstorage +cd $GOPATH/src/github.com/dvriesman/cloudstorage go test -p 1 ./... ``` diff --git a/awss3/store.go b/awss3/store.go index f405f1f..c202563 100644 --- a/awss3/store.go +++ b/awss3/store.go @@ -20,8 +20,8 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/csbufio" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/csbufio" ) const ( @@ -705,3 +705,7 @@ func (o *object) Release() error { os.Remove(o.cachepath) return nil } + +func (o *object) AcquireLease(uid string) (string, error) { + return "", nil +} \ No newline at end of file diff --git a/awss3/store_test.go b/awss3/store_test.go index 941e20d..30da8ad 100644 --- a/awss3/store_test.go +++ b/awss3/store_test.go @@ -7,9 +7,9 @@ import ( "github.com/araddon/gou" "github.com/bmizerany/assert" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/awss3" - "github.com/lytics/cloudstorage/testutils" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/awss3" + "github.com/dvriesman/cloudstorage/testutils" ) /* diff --git a/azure/README.md b/azure/README.md index 6a4524f..2acd5a3 100644 --- a/azure/README.md +++ b/azure/README.md @@ -29,8 +29,8 @@ import ( "github.com/araddon/gou" "google.golang.org/api/iterator" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/azure" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/azure" ) /* diff --git a/azure/example/main.go b/azure/example/main.go index b4efb5c..e9a316d 100644 --- a/azure/example/main.go +++ b/azure/example/main.go @@ -8,8 +8,8 @@ import ( "github.com/araddon/gou" "google.golang.org/api/iterator" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/azure" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/azure" ) /* diff --git a/azure/store.go b/azure/store.go index 2a98a70..cce59c5 100644 --- a/azure/store.go +++ b/azure/store.go @@ -15,8 +15,8 @@ import ( "github.com/pborman/uuid" "golang.org/x/net/context" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/csbufio" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/csbufio" ) const ( @@ -532,6 +532,13 @@ func (o *object) SetMetaData(meta map[string]string) { o.metadata = meta } + +func (o *object) AcquireLease(uid string) (string, error) { + options := &az.LeaseOptions{} + return o.o.AcquireLease(-1, uid, options) +} + + func (o *object) Delete() error { return o.fs.Delete(context.Background(), o.name) } diff --git a/azure/store_test.go b/azure/store_test.go index 120a297..71e6a06 100644 --- a/azure/store_test.go +++ b/azure/store_test.go @@ -7,9 +7,9 @@ import ( "github.com/araddon/gou" "github.com/stretchr/testify/assert" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/azure" - "github.com/lytics/cloudstorage/testutils" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/azure" + "github.com/dvriesman/cloudstorage/testutils" ) /* diff --git a/backblaze/debug.test b/backblaze/debug.test new file mode 100755 index 0000000..f2d3171 Binary files /dev/null and b/backblaze/debug.test differ diff --git a/backblaze/store.go b/backblaze/store.go new file mode 100644 index 0000000..a4805de --- /dev/null +++ b/backblaze/store.go @@ -0,0 +1,480 @@ +package backblaze + +import ( + "context" + "fmt" + "io" + "os" + "path" + "strings" + "time" + + "github.com/araddon/gou" + "github.com/pborman/uuid" + + bz "gopkg.in/kothar/go-backblaze.v0" + + "github.com/dvriesman/cloudstorage" +) + +const ( + + //StoreType - Storage Type + StoreType = "backblaze" + + //Account = Auth mechanism + Account = "account" + + //Key = Auth mechanism + Key = "key" + + //AuthKey = Auth Type + AuthKey cloudstorage.AuthMethod = "account_key" +) + +var ( + // Retries number of times to retry upon failures. + Retries = 3 + + // ErrNoAccount error messsage + ErrNoAccount = fmt.Errorf("no valid backblaze account informed") + + // ErrNoAccessKey error messsage + ErrNoAccessKey = fmt.Errorf("no valid backblaze key informed") + + // ErrNotImplemented error messsage + ErrNotImplemented = fmt.Errorf("this method was not implemented yet") + + // ErrNotSupported error messsage + ErrNotSupported = fmt.Errorf("this feature is not supported for backblaze") + + // ErrNoAuth error for no findable auth + ErrNoAuth = fmt.Errorf("No auth provided") + + // ErrObjectNotFound indicate object not found + ErrObjectNotFound = fmt.Errorf("not_found") +) + +func init() { + // Register this Driver (backblaze) in cloudstorage driver registry. + cloudstorage.Register(StoreType, func(conf *cloudstorage.Config) (cloudstorage.Store, error) { + client, err := NewClient(conf) + if err != nil { + return nil, err + } + return NewStore(client, conf) + }) +} + +type ( + // FS Simple wrapper for accessing backblaze blob files, it doesn't currently implement a + // Reader/Writer interface so not useful for stream reading of large files yet. + FS struct { + ID string + client *bz.B2 + bucket string + cachepath string + } + + blobRef struct { + fs *FS + name string + bucket *bz.Bucket + updated time.Time + opened bool + cachedcopy *os.File + cachepath string + rc io.ReadCloser + readonly bool + metadata map[string]string + } +) + +// NewClient create new AWS s3 Client. Uses cloudstorage.Config to read +// necessary config settings such as bucket, region, auth. +func NewClient(conf *cloudstorage.Config) (*bz.B2, error) { + + switch conf.AuthMethod { + case AuthKey: + + account := conf.Settings.String(Account) + if account == "" { + return nil, ErrNoAccount + } + key := conf.Settings.String(Key) + if account == "" { + return nil, ErrNoAccessKey + } + + b2, err := bz.NewB2(bz.Credentials{ + AccountID: account, + ApplicationKey: key, + }) + if err != nil { + gou.Warnf("could not get backblaze client %v", err) + return nil, err + } + + return b2, err + } + + return nil, ErrNoAuth +} + +// NewStore Create Backblaze client of type cloudstorage.Store +func NewStore(c *bz.B2, conf *cloudstorage.Config) (*FS, error) { + + if conf.TmpDir == "" { + return nil, fmt.Errorf("unable to create cachepath. config.tmpdir=%q", conf.TmpDir) + } + + err := os.MkdirAll(conf.TmpDir, 0775) + if err != nil { + return nil, fmt.Errorf("unable to create cachepath. config.tmpdir=%q err=%v", conf.TmpDir, err) + } + + uid := uuid.NewUUID().String() + uid = strings.Replace(uid, "-", "", -1) + + return &FS{ + client: c, + bucket: conf.Bucket, + cachepath: conf.TmpDir, + ID: uid, + }, nil +} + +// Type is he Store Type [google, s3, azure, localfs, etc] +func (f *FS) Type() string { + return StoreType +} + +// Client gets access to the underlying native Client for Backblaze +func (f *FS) Client() interface{} { + return f.client +} + +// Get returns an object (file) from the cloud store. The object +// isn't opened already, see Object.Open() +// ObjectNotFound will be returned if the object is not found. +func (f *FS) Get(ctx context.Context, o string) (cloudstorage.Object, error) { + + bucket, err := f.client.Bucket(f.bucket) + if err != nil { + return nil, err + } + + cf := cloudstorage.CachePathObj(f.cachepath, o, f.ID) + + blobRef := &blobRef{fs: f, + bucket: bucket, + name: o, + cachepath: cf, + metadata: map[string]string{cloudstorage.ContentTypeKey: cloudstorage.ContentType(o)}, + } + + return blobRef, nil + +} + +// Objects returns an object Iterator to allow paging through object +// which keeps track of page cursors. Query defines the specific set +// of filters to apply to request. +func (f *FS) Objects(ctx context.Context, q cloudstorage.Query) (cloudstorage.ObjectIterator, error) { + return nil, ErrNotImplemented +} + +// List file/objects filter by given query. This just wraps the object-iterator +// returning full list of objects. +func (f *FS) List(ctx context.Context, q cloudstorage.Query) (*cloudstorage.ObjectsResponse, error) { + return nil, ErrNotImplemented +} + +// Folders creates list of folders +func (f *FS) Folders(ctx context.Context, q cloudstorage.Query) ([]string, error) { + return nil, ErrNotImplemented +} + +// NewReader creates a new Reader to read the contents of the object. +// ErrObjectNotFound will be returned if the object is not found. +func (f *FS) NewReader(o string) (io.ReadCloser, error) { + return f.NewReaderWithContext(context.Background(), o) +} + +// NewReaderWithContext with context (for cancelation, etc) +func (f *FS) NewReaderWithContext(ctx context.Context, o string) (io.ReadCloser, error) { + return nil, ErrNotSupported +} + +// String default descriptor. +func (f *FS) String() string { + return "backblaze" +} + +// NewWriter returns a io.Writer that writes to a Cloud object +// associated with this backing Store object. +// +// A new object will be created if an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called +func (f *FS) NewWriter(o string, metadata map[string]string) (io.WriteCloser, error) { + return f.NewWriterWithContext(context.Background(), o, metadata) +} + +// NewWriterWithContext but with context. +func (f *FS) NewWriterWithContext(ctx context.Context, o string, metadata map[string]string) (io.WriteCloser, error) { + return nil, ErrNotSupported +} + +// NewObject creates a new empty object backed by the cloud store +// This new object isn't' synced/created in the backing store +// until the object is Closed/Sync'ed. +func (f *FS) NewObject(o string) (cloudstorage.Object, error) { + + bucket, err := f.client.Bucket(f.bucket) + if err != nil { + return nil, err + } + + cf := cloudstorage.CachePathObj(f.cachepath, o, f.ID) + + blobRef := &blobRef{fs: f, + bucket: bucket, + name: o, + cachepath: cf, + metadata: map[string]string{cloudstorage.ContentTypeKey: cloudstorage.ContentType(o)}, + } + + return blobRef, nil +} + +// Delete removes the object from the cloud store. +func (f *FS) Delete(ctx context.Context, o string) error { + return ErrNotSupported +} + +// Name of object/file. +func (b *blobRef) Name() string { + return b.name +} + +// String is default descriptor. +func (b *blobRef) String() string { + return fmt.Sprintf("backblaze://%s/", b.bucket.Name) +} + +// Updated timestamp. +func (b *blobRef) Updated() time.Time { + return b.updated +} + +// MetaData is map of arbitrary name/value pairs about object. +func (b *blobRef) MetaData() map[string]string { + return b.metadata +} + +// SetMetaData allows you to set key/value pairs. +func (b *blobRef) SetMetaData(meta map[string]string) { + b.metadata = meta +} + +// StorageSource is the type of store. +func (b *blobRef) StorageSource() string { + return StoreType +} + +// Open copies the remote file to a local cache and opens the cached version +// for read/writing. Calling Close/Sync will push the copy back to the +// backing store. +func (b *blobRef) Open(readonly cloudstorage.AccessLevel) (*os.File, error) { + + if b.opened { + return nil, fmt.Errorf("the store object is already opened. %s", b.name) + } + + var errs = make([]error, 0) + var cachedcopy *os.File + var err error + var xreadonly = readonly == cloudstorage.ReadOnly + + err = os.MkdirAll(path.Dir(b.cachepath), 0775) + if err != nil { + return nil, fmt.Errorf("error occurred creating cachedcopy dir. cachepath=%s object=%s err=%v", b.cachepath, b.name, err) + } + + err = cloudstorage.EnsureDir(b.cachepath) + if err != nil { + return nil, fmt.Errorf("error occurred creating cachedcopy's dir. cachepath=%s err=%v", b.cachepath, err) + } + + cachedcopy, err = os.Create(b.cachepath) + if err != nil { + return nil, fmt.Errorf("error occurred creating file. local=%s err=%v", b.cachepath, err) + } + + for try := 0; try < Retries; try++ { + + if b.rc == nil { + _, rc, err := b.bucket.DownloadFileByName(b.name) + if err != nil { + if err.Error() == "not_found: bucket "+b.bucket.Name+" does not have file: "+b.name { + // New, this is fine + } else { + // lets re-try + errs = append(errs, fmt.Errorf("error getting object err=%v", err)) + cloudstorage.Backoff(try) + continue + } + } + if rc != nil { + b.rc = rc + } + } + + if b.rc != nil { + // we have a preexisting object, so lets download it.. + defer b.rc.Close() + + if _, err := cachedcopy.Seek(0, os.SEEK_SET); err != nil { + return nil, fmt.Errorf("error seeking to start of cachedcopy err=%v", err) //don't retry on local fs errors + } + + _, err = io.Copy(cachedcopy, b.rc) + if err != nil { + errs = append(errs, fmt.Errorf("error coping bytes. err=%v", err)) + //recreate the cachedcopy file incase it has incomplete data + if err := os.Remove(b.cachepath); err != nil { + return nil, fmt.Errorf("error resetting the cachedcopy err=%v", err) //don't retry on local fs errors + } + if cachedcopy, err = os.Create(b.cachepath); err != nil { + return nil, fmt.Errorf("error creating a new cachedcopy file. local=%s err=%v", b.cachepath, err) + } + + cloudstorage.Backoff(try) + continue + } + } + + if xreadonly { + cachedcopy.Close() + cachedcopy, err = os.Open(b.cachepath) + if err != nil { + name := "unknown" + if cachedcopy != nil { + name = cachedcopy.Name() + } + return nil, fmt.Errorf("error opening file. local=%s object=%s tfile=%v err=%v", b.cachepath, b.name, name, err) + } + } else { + if _, err := cachedcopy.Seek(0, os.SEEK_SET); err != nil { + return nil, fmt.Errorf("error seeking to start of cachedcopy err=%v", err) //don't retry on local fs errors + } + } + + b.cachedcopy = cachedcopy + b.readonly = xreadonly + b.opened = true + return b.cachedcopy, nil + + } + + return nil, fmt.Errorf("fetch error retry cnt reached: obj=%s tfile=%v errs:[%v]", b.name, b.cachepath, errs) +} + +// Release will remove the locally cached copy of the file. You most call Close +// before releasing. Release will call os.Remove(local_copy_file) so opened +// filehandles need to be closed. +func (b *blobRef) Release() error { + if b.cachedcopy != nil { + gou.Debugf("release %q vs %q", b.cachedcopy.Name(), b.cachepath) + b.cachedcopy.Close() + return os.Remove(b.cachepath) + } + os.Remove(b.cachepath) + return nil +} + + + +// Implement io.ReadWriteCloser Open most be called before using these +// functions. +func (b *blobRef) Read(p []byte) (n int, err error) { + return b.cachedcopy.Read(p) +} + +func (b *blobRef) Write(p []byte) (n int, err error) { + if b.cachedcopy == nil { + _, err := b.Open(cloudstorage.ReadWrite) + if err != nil { + return 0, err + } + } + return b.cachedcopy.Write(p) +} + +func (b *blobRef) Sync() error { + if !b.opened { + return fmt.Errorf("object isn't opened object:%s", b.name) + } + if b.readonly { + return fmt.Errorf("trying to Sync a readonly object:%s", b.name) + } + + cachedcopy, err := os.OpenFile(b.cachepath, os.O_RDWR, 0664) + if err != nil { + return fmt.Errorf("couldn't open localfile for sync'ing. local=%s err=%v", b.cachepath, err) + } + defer cachedcopy.Close() + + if _, err := cachedcopy.Seek(0, os.SEEK_SET); err != nil { + return fmt.Errorf("error seeking to start of cachedcopy err=%v", err) //don't retry on local filesystem errors + } + + // Upload the file + if _, err = b.bucket.UploadFile(b.name, b.metadata, cachedcopy); err != nil { + gou.Warnf("could not upload %v", err) + return fmt.Errorf("failed to upload file, %v", err) + } + return nil +} + +func (b *blobRef) Close() error { + if !b.opened { + return nil + } + defer func() { + os.Remove(b.cachepath) + b.cachedcopy = nil + b.opened = false + }() + + serr := b.cachedcopy.Sync() + cerr := b.cachedcopy.Close() + if serr != nil || cerr != nil { + return fmt.Errorf("error on sync and closing localfile. %s sync=%v, err=%v", b.cachepath, serr, cerr) + } + + if b.opened && !b.readonly { + err := b.Sync() + if err != nil { + gou.Errorf("error on sync %v", err) + return err + } + } + return nil +} + +// File returns the cached/local copy of the file +func (b *blobRef) File() *os.File { + return b.cachedcopy +} + +// Delete removes the object from the cloud store and local cache. +func (b *blobRef) Delete() error { + return ErrNotImplemented +} + +func (b *blobRef) AcquireLease(uid string) (string, error) { + return "", nil +} \ No newline at end of file diff --git a/debug.test b/debug.test new file mode 100755 index 0000000..9d1ae02 Binary files /dev/null and b/debug.test differ diff --git a/google/apistore.go b/google/apistore.go index 708cb4e..4162b6f 100644 --- a/google/apistore.go +++ b/google/apistore.go @@ -1,7 +1,7 @@ package google import ( - "github.com/lytics/cloudstorage" + "github.com/dvriesman/cloudstorage" "google.golang.org/api/storage/v1" ) diff --git a/google/client.go b/google/client.go index cfc25a2..23c4451 100644 --- a/google/client.go +++ b/google/client.go @@ -13,7 +13,7 @@ import ( "golang.org/x/oauth2/jwt" "google.golang.org/api/option" - "github.com/lytics/cloudstorage" + "github.com/dvriesman/cloudstorage" ) const ( @@ -155,7 +155,7 @@ func NewGoogleClient(conf *cloudstorage.Config) (client GoogleOAuthClient, err e switch conf.AuthMethod { case AuthGCEDefaultOAuthToken: // This token method uses the default OAuth token with GCS created by tools like gsutils, gcloud, etc... - // See github.com/lytics/lio/src/ext_svcs/google/google_transporter.go : BuildDefaultGoogleTransporter + // See github.com/dvriesman/lio/src/ext_svcs/google/google_transporter.go : BuildDefaultGoogleTransporter client, err = BuildDefaultGoogleTransporter("") if err != nil { return nil, err diff --git a/google/google_test.go b/google/google_test.go index 906bf4b..dbc0f4a 100644 --- a/google/google_test.go +++ b/google/google_test.go @@ -5,9 +5,9 @@ import ( "os" "testing" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/google" - "github.com/lytics/cloudstorage/testutils" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/google" + "github.com/dvriesman/cloudstorage/testutils" ) /* diff --git a/google/store.go b/google/store.go index 87e6a3e..67c0501 100644 --- a/google/store.go +++ b/google/store.go @@ -15,7 +15,7 @@ import ( "golang.org/x/net/context" "google.golang.org/api/iterator" - "github.com/lytics/cloudstorage" + "github.com/dvriesman/cloudstorage" ) func init() { @@ -547,3 +547,7 @@ func (o *object) Release() error { os.Remove(o.cachepath) return nil } + +func (o *object) AcquireLease(uid string) (string, error) { + return "", nil +} \ No newline at end of file diff --git a/google/storeutils/get.go b/google/storeutils/get.go index 16d9f8d..1a66f9c 100644 --- a/google/storeutils/get.go +++ b/google/storeutils/get.go @@ -7,7 +7,7 @@ import ( "cloud.google.com/go/storage" "golang.org/x/net/context" - "github.com/lytics/cloudstorage" + "github.com/dvriesman/cloudstorage" ) // GetObject Gets a single object's bytes based on bucket and name parameters diff --git a/google/storeutils/utils.go b/google/storeutils/utils.go index eb803d2..86154a6 100644 --- a/google/storeutils/utils.go +++ b/google/storeutils/utils.go @@ -1,7 +1,7 @@ package storeutils import ( - "github.com/lytics/cloudstorage" + "github.com/dvriesman/cloudstorage" "golang.org/x/net/context" ) diff --git a/google/storeutils/utils_test.go b/google/storeutils/utils_test.go index 1d7c4eb..f4d6b37 100644 --- a/google/storeutils/utils_test.go +++ b/google/storeutils/utils_test.go @@ -8,8 +8,8 @@ import ( "golang.org/x/net/context" "google.golang.org/api/option" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/google" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/google" ) var testBucket = os.Getenv("TESTBUCKET") diff --git a/localfs/store.go b/localfs/store.go index ba476da..e3b4b09 100644 --- a/localfs/store.go +++ b/localfs/store.go @@ -11,8 +11,8 @@ import ( "strings" "time" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/csbufio" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/csbufio" "github.com/pborman/uuid" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -499,3 +499,7 @@ func (o *object) Release() error { os.Remove(o.cachepath) return nil } + +func (o *object) AcquireLease(uid string) (string, error) { + return "", nil +} \ No newline at end of file diff --git a/localfs/store_test.go b/localfs/store_test.go index 6f4907b..1829019 100644 --- a/localfs/store_test.go +++ b/localfs/store_test.go @@ -5,9 +5,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/localfs" - "github.com/lytics/cloudstorage/testutils" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/localfs" + "github.com/dvriesman/cloudstorage/testutils" ) func TestAll(t *testing.T) { diff --git a/registry_test.go b/registry_test.go index 5715734..c116439 100644 --- a/registry_test.go +++ b/registry_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/lytics/cloudstorage" + "github.com/dvriesman/cloudstorage" ) func TestRegistry(t *testing.T) { diff --git a/sftp/store.go b/sftp/store.go index 37d22b4..3e8c2ae 100644 --- a/sftp/store.go +++ b/sftp/store.go @@ -15,7 +15,7 @@ import ( "golang.org/x/crypto/ssh" "golang.org/x/net/context" - "github.com/lytics/cloudstorage" + "github.com/dvriesman/cloudstorage" ) const ( diff --git a/sftp/store_test.go b/sftp/store_test.go index 495083b..dec206b 100644 --- a/sftp/store_test.go +++ b/sftp/store_test.go @@ -12,9 +12,9 @@ import ( "github.com/araddon/gou" "github.com/bmizerany/assert" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/sftp" - "github.com/lytics/cloudstorage/testutils" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/sftp" + "github.com/dvriesman/cloudstorage/testutils" ) /* diff --git a/store.go b/store.go index e9b3c82..c014c06 100644 --- a/store.go +++ b/store.go @@ -143,8 +143,11 @@ type ( File() *os.File // Delete removes the object from the cloud store and local cache. Delete() error + AcquireLease(uid string) (string, error) } + + // ObjectIterator interface to page through objects // See go doc for examples https://github.com/GoogleCloudPlatform/google-cloud-go/wiki/Iterator-Guidelines ObjectIterator interface { diff --git a/store_test.go b/store_test.go index a1bb0c4..c64c1a3 100644 --- a/store_test.go +++ b/store_test.go @@ -7,9 +7,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/lytics/cloudstorage" - "github.com/lytics/cloudstorage/localfs" - "github.com/lytics/cloudstorage/testutils" + "github.com/dvriesman/cloudstorage" + "github.com/dvriesman/cloudstorage/localfs" + "github.com/dvriesman/cloudstorage/testutils" ) func TestAll(t *testing.T) { diff --git a/testutils/testutils.go b/testutils/testutils.go index 7cd9fb8..296960b 100644 --- a/testutils/testutils.go +++ b/testutils/testutils.go @@ -14,7 +14,7 @@ import ( "time" "github.com/araddon/gou" - "github.com/lytics/cloudstorage" + "github.com/dvriesman/cloudstorage" "github.com/stretchr/testify/assert" "google.golang.org/api/iterator" )