diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 88c4b3ca8..4b77e6371 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.24 +ARG GO_VERSION=1.25 ARG DEBIAN_CODENAME=trixie FROM mcr.microsoft.com/devcontainers/go:${GO_VERSION}-${DEBIAN_CODENAME} diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d3d5284f7..984c7fc50 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -3,7 +3,7 @@ "build": { "dockerfile": "Dockerfile", "args": { - "GO_VERSION": "1.24", + "GO_VERSION": "1.25", "DEBIAN_CODENAME": "trixie", "PG_MAJOR": "16", "MARIADB_MAJOR": "12.0.2" diff --git a/Dockerfile b/Dockerfile index 126411d0f..ffe6bca25 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.24.9-bookworm AS builder +FROM golang:1.25.8-bookworm AS builder WORKDIR /app COPY go.* ./ RUN go mod download diff --git a/client/swagger/http/storage/create_internetarchive_storage_parameters.go b/client/swagger/http/storage/create_internetarchive_storage_parameters.go index 52356a7d0..416180925 100644 --- a/client/swagger/http/storage/create_internetarchive_storage_parameters.go +++ b/client/swagger/http/storage/create_internetarchive_storage_parameters.go @@ -67,7 +67,7 @@ type CreateInternetarchiveStorageParams struct { Request body */ - Request *models.StorageCreateInternetarchiveStorageRequest + Request models.StorageCreateInternetarchiveStorageRequest timeout time.Duration Context context.Context @@ -123,13 +123,13 @@ func (o *CreateInternetarchiveStorageParams) SetHTTPClient(client *http.Client) } // WithRequest adds the request to the create internetarchive storage params -func (o *CreateInternetarchiveStorageParams) WithRequest(request *models.StorageCreateInternetarchiveStorageRequest) *CreateInternetarchiveStorageParams { +func (o *CreateInternetarchiveStorageParams) WithRequest(request models.StorageCreateInternetarchiveStorageRequest) *CreateInternetarchiveStorageParams { o.SetRequest(request) return o } // SetRequest adds the request to the create internetarchive storage params -func (o *CreateInternetarchiveStorageParams) SetRequest(request *models.StorageCreateInternetarchiveStorageRequest) { +func (o *CreateInternetarchiveStorageParams) SetRequest(request models.StorageCreateInternetarchiveStorageRequest) { o.Request = request } diff --git a/client/swagger/http/storage/create_s3_bizfly_cloud_storage_parameters.go b/client/swagger/http/storage/create_s3_bizfly_cloud_storage_parameters.go new file mode 100644 index 000000000..7f7cde00c --- /dev/null +++ b/client/swagger/http/storage/create_s3_bizfly_cloud_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3BizflyCloudStorageParams creates a new CreateS3BizflyCloudStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3BizflyCloudStorageParams() *CreateS3BizflyCloudStorageParams { + return &CreateS3BizflyCloudStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3BizflyCloudStorageParamsWithTimeout creates a new CreateS3BizflyCloudStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3BizflyCloudStorageParamsWithTimeout(timeout time.Duration) *CreateS3BizflyCloudStorageParams { + return &CreateS3BizflyCloudStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3BizflyCloudStorageParamsWithContext creates a new CreateS3BizflyCloudStorageParams object +// with the ability to set a context for a request. +func NewCreateS3BizflyCloudStorageParamsWithContext(ctx context.Context) *CreateS3BizflyCloudStorageParams { + return &CreateS3BizflyCloudStorageParams{ + Context: ctx, + } +} + +// NewCreateS3BizflyCloudStorageParamsWithHTTPClient creates a new CreateS3BizflyCloudStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3BizflyCloudStorageParamsWithHTTPClient(client *http.Client) *CreateS3BizflyCloudStorageParams { + return &CreateS3BizflyCloudStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3BizflyCloudStorageParams contains all the parameters to send to the API endpoint + + for the create s3 bizfly cloud storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3BizflyCloudStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3BizflyCloudStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 bizfly cloud storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3BizflyCloudStorageParams) WithDefaults() *CreateS3BizflyCloudStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 bizfly cloud storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3BizflyCloudStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 bizfly cloud storage params +func (o *CreateS3BizflyCloudStorageParams) WithTimeout(timeout time.Duration) *CreateS3BizflyCloudStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 bizfly cloud storage params +func (o *CreateS3BizflyCloudStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 bizfly cloud storage params +func (o *CreateS3BizflyCloudStorageParams) WithContext(ctx context.Context) *CreateS3BizflyCloudStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 bizfly cloud storage params +func (o *CreateS3BizflyCloudStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 bizfly cloud storage params +func (o *CreateS3BizflyCloudStorageParams) WithHTTPClient(client *http.Client) *CreateS3BizflyCloudStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 bizfly cloud storage params +func (o *CreateS3BizflyCloudStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 bizfly cloud storage params +func (o *CreateS3BizflyCloudStorageParams) WithRequest(request *models.StorageCreateS3BizflyCloudStorageRequest) *CreateS3BizflyCloudStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 bizfly cloud storage params +func (o *CreateS3BizflyCloudStorageParams) SetRequest(request *models.StorageCreateS3BizflyCloudStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3BizflyCloudStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_bizfly_cloud_storage_responses.go b/client/swagger/http/storage/create_s3_bizfly_cloud_storage_responses.go new file mode 100644 index 000000000..664f61130 --- /dev/null +++ b/client/swagger/http/storage/create_s3_bizfly_cloud_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3BizflyCloudStorageReader is a Reader for the CreateS3BizflyCloudStorage structure. +type CreateS3BizflyCloudStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3BizflyCloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3BizflyCloudStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3BizflyCloudStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3BizflyCloudStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/bizflycloud] CreateS3BizflyCloudStorage", response, response.Code()) + } +} + +// NewCreateS3BizflyCloudStorageOK creates a CreateS3BizflyCloudStorageOK with default headers values +func NewCreateS3BizflyCloudStorageOK() *CreateS3BizflyCloudStorageOK { + return &CreateS3BizflyCloudStorageOK{} +} + +/* +CreateS3BizflyCloudStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3BizflyCloudStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 bizfly cloud storage o k response has a 2xx status code +func (o *CreateS3BizflyCloudStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 bizfly cloud storage o k response has a 3xx status code +func (o *CreateS3BizflyCloudStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 bizfly cloud storage o k response has a 4xx status code +func (o *CreateS3BizflyCloudStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 bizfly cloud storage o k response has a 5xx status code +func (o *CreateS3BizflyCloudStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 bizfly cloud storage o k response a status code equal to that given +func (o *CreateS3BizflyCloudStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 bizfly cloud storage o k response +func (o *CreateS3BizflyCloudStorageOK) Code() int { + return 200 +} + +func (o *CreateS3BizflyCloudStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/bizflycloud][%d] createS3BizflyCloudStorageOK %s", 200, payload) +} + +func (o *CreateS3BizflyCloudStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/bizflycloud][%d] createS3BizflyCloudStorageOK %s", 200, payload) +} + +func (o *CreateS3BizflyCloudStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3BizflyCloudStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3BizflyCloudStorageBadRequest creates a CreateS3BizflyCloudStorageBadRequest with default headers values +func NewCreateS3BizflyCloudStorageBadRequest() *CreateS3BizflyCloudStorageBadRequest { + return &CreateS3BizflyCloudStorageBadRequest{} +} + +/* +CreateS3BizflyCloudStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3BizflyCloudStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 bizfly cloud storage bad request response has a 2xx status code +func (o *CreateS3BizflyCloudStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 bizfly cloud storage bad request response has a 3xx status code +func (o *CreateS3BizflyCloudStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 bizfly cloud storage bad request response has a 4xx status code +func (o *CreateS3BizflyCloudStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 bizfly cloud storage bad request response has a 5xx status code +func (o *CreateS3BizflyCloudStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 bizfly cloud storage bad request response a status code equal to that given +func (o *CreateS3BizflyCloudStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 bizfly cloud storage bad request response +func (o *CreateS3BizflyCloudStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3BizflyCloudStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/bizflycloud][%d] createS3BizflyCloudStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3BizflyCloudStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/bizflycloud][%d] createS3BizflyCloudStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3BizflyCloudStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3BizflyCloudStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3BizflyCloudStorageInternalServerError creates a CreateS3BizflyCloudStorageInternalServerError with default headers values +func NewCreateS3BizflyCloudStorageInternalServerError() *CreateS3BizflyCloudStorageInternalServerError { + return &CreateS3BizflyCloudStorageInternalServerError{} +} + +/* +CreateS3BizflyCloudStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3BizflyCloudStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 bizfly cloud storage internal server error response has a 2xx status code +func (o *CreateS3BizflyCloudStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 bizfly cloud storage internal server error response has a 3xx status code +func (o *CreateS3BizflyCloudStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 bizfly cloud storage internal server error response has a 4xx status code +func (o *CreateS3BizflyCloudStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 bizfly cloud storage internal server error response has a 5xx status code +func (o *CreateS3BizflyCloudStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 bizfly cloud storage internal server error response a status code equal to that given +func (o *CreateS3BizflyCloudStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 bizfly cloud storage internal server error response +func (o *CreateS3BizflyCloudStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3BizflyCloudStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/bizflycloud][%d] createS3BizflyCloudStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3BizflyCloudStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/bizflycloud][%d] createS3BizflyCloudStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3BizflyCloudStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3BizflyCloudStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_cubbit_storage_parameters.go b/client/swagger/http/storage/create_s3_cubbit_storage_parameters.go new file mode 100644 index 000000000..b58a41089 --- /dev/null +++ b/client/swagger/http/storage/create_s3_cubbit_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3CubbitStorageParams creates a new CreateS3CubbitStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3CubbitStorageParams() *CreateS3CubbitStorageParams { + return &CreateS3CubbitStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3CubbitStorageParamsWithTimeout creates a new CreateS3CubbitStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3CubbitStorageParamsWithTimeout(timeout time.Duration) *CreateS3CubbitStorageParams { + return &CreateS3CubbitStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3CubbitStorageParamsWithContext creates a new CreateS3CubbitStorageParams object +// with the ability to set a context for a request. +func NewCreateS3CubbitStorageParamsWithContext(ctx context.Context) *CreateS3CubbitStorageParams { + return &CreateS3CubbitStorageParams{ + Context: ctx, + } +} + +// NewCreateS3CubbitStorageParamsWithHTTPClient creates a new CreateS3CubbitStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3CubbitStorageParamsWithHTTPClient(client *http.Client) *CreateS3CubbitStorageParams { + return &CreateS3CubbitStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3CubbitStorageParams contains all the parameters to send to the API endpoint + + for the create s3 cubbit storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3CubbitStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3CubbitStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 cubbit storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3CubbitStorageParams) WithDefaults() *CreateS3CubbitStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 cubbit storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3CubbitStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 cubbit storage params +func (o *CreateS3CubbitStorageParams) WithTimeout(timeout time.Duration) *CreateS3CubbitStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 cubbit storage params +func (o *CreateS3CubbitStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 cubbit storage params +func (o *CreateS3CubbitStorageParams) WithContext(ctx context.Context) *CreateS3CubbitStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 cubbit storage params +func (o *CreateS3CubbitStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 cubbit storage params +func (o *CreateS3CubbitStorageParams) WithHTTPClient(client *http.Client) *CreateS3CubbitStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 cubbit storage params +func (o *CreateS3CubbitStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 cubbit storage params +func (o *CreateS3CubbitStorageParams) WithRequest(request *models.StorageCreateS3CubbitStorageRequest) *CreateS3CubbitStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 cubbit storage params +func (o *CreateS3CubbitStorageParams) SetRequest(request *models.StorageCreateS3CubbitStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3CubbitStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_cubbit_storage_responses.go b/client/swagger/http/storage/create_s3_cubbit_storage_responses.go new file mode 100644 index 000000000..925dbb38b --- /dev/null +++ b/client/swagger/http/storage/create_s3_cubbit_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3CubbitStorageReader is a Reader for the CreateS3CubbitStorage structure. +type CreateS3CubbitStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3CubbitStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3CubbitStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3CubbitStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3CubbitStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/cubbit] CreateS3CubbitStorage", response, response.Code()) + } +} + +// NewCreateS3CubbitStorageOK creates a CreateS3CubbitStorageOK with default headers values +func NewCreateS3CubbitStorageOK() *CreateS3CubbitStorageOK { + return &CreateS3CubbitStorageOK{} +} + +/* +CreateS3CubbitStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3CubbitStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 cubbit storage o k response has a 2xx status code +func (o *CreateS3CubbitStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 cubbit storage o k response has a 3xx status code +func (o *CreateS3CubbitStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 cubbit storage o k response has a 4xx status code +func (o *CreateS3CubbitStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 cubbit storage o k response has a 5xx status code +func (o *CreateS3CubbitStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 cubbit storage o k response a status code equal to that given +func (o *CreateS3CubbitStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 cubbit storage o k response +func (o *CreateS3CubbitStorageOK) Code() int { + return 200 +} + +func (o *CreateS3CubbitStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/cubbit][%d] createS3CubbitStorageOK %s", 200, payload) +} + +func (o *CreateS3CubbitStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/cubbit][%d] createS3CubbitStorageOK %s", 200, payload) +} + +func (o *CreateS3CubbitStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3CubbitStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3CubbitStorageBadRequest creates a CreateS3CubbitStorageBadRequest with default headers values +func NewCreateS3CubbitStorageBadRequest() *CreateS3CubbitStorageBadRequest { + return &CreateS3CubbitStorageBadRequest{} +} + +/* +CreateS3CubbitStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3CubbitStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 cubbit storage bad request response has a 2xx status code +func (o *CreateS3CubbitStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 cubbit storage bad request response has a 3xx status code +func (o *CreateS3CubbitStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 cubbit storage bad request response has a 4xx status code +func (o *CreateS3CubbitStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 cubbit storage bad request response has a 5xx status code +func (o *CreateS3CubbitStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 cubbit storage bad request response a status code equal to that given +func (o *CreateS3CubbitStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 cubbit storage bad request response +func (o *CreateS3CubbitStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3CubbitStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/cubbit][%d] createS3CubbitStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3CubbitStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/cubbit][%d] createS3CubbitStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3CubbitStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3CubbitStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3CubbitStorageInternalServerError creates a CreateS3CubbitStorageInternalServerError with default headers values +func NewCreateS3CubbitStorageInternalServerError() *CreateS3CubbitStorageInternalServerError { + return &CreateS3CubbitStorageInternalServerError{} +} + +/* +CreateS3CubbitStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3CubbitStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 cubbit storage internal server error response has a 2xx status code +func (o *CreateS3CubbitStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 cubbit storage internal server error response has a 3xx status code +func (o *CreateS3CubbitStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 cubbit storage internal server error response has a 4xx status code +func (o *CreateS3CubbitStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 cubbit storage internal server error response has a 5xx status code +func (o *CreateS3CubbitStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 cubbit storage internal server error response a status code equal to that given +func (o *CreateS3CubbitStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 cubbit storage internal server error response +func (o *CreateS3CubbitStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3CubbitStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/cubbit][%d] createS3CubbitStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3CubbitStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/cubbit][%d] createS3CubbitStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3CubbitStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3CubbitStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_exaba_storage_parameters.go b/client/swagger/http/storage/create_s3_exaba_storage_parameters.go new file mode 100644 index 000000000..703ea5844 --- /dev/null +++ b/client/swagger/http/storage/create_s3_exaba_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3ExabaStorageParams creates a new CreateS3ExabaStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3ExabaStorageParams() *CreateS3ExabaStorageParams { + return &CreateS3ExabaStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3ExabaStorageParamsWithTimeout creates a new CreateS3ExabaStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3ExabaStorageParamsWithTimeout(timeout time.Duration) *CreateS3ExabaStorageParams { + return &CreateS3ExabaStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3ExabaStorageParamsWithContext creates a new CreateS3ExabaStorageParams object +// with the ability to set a context for a request. +func NewCreateS3ExabaStorageParamsWithContext(ctx context.Context) *CreateS3ExabaStorageParams { + return &CreateS3ExabaStorageParams{ + Context: ctx, + } +} + +// NewCreateS3ExabaStorageParamsWithHTTPClient creates a new CreateS3ExabaStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3ExabaStorageParamsWithHTTPClient(client *http.Client) *CreateS3ExabaStorageParams { + return &CreateS3ExabaStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3ExabaStorageParams contains all the parameters to send to the API endpoint + + for the create s3 exaba storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3ExabaStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3ExabaStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 exaba storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3ExabaStorageParams) WithDefaults() *CreateS3ExabaStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 exaba storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3ExabaStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 exaba storage params +func (o *CreateS3ExabaStorageParams) WithTimeout(timeout time.Duration) *CreateS3ExabaStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 exaba storage params +func (o *CreateS3ExabaStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 exaba storage params +func (o *CreateS3ExabaStorageParams) WithContext(ctx context.Context) *CreateS3ExabaStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 exaba storage params +func (o *CreateS3ExabaStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 exaba storage params +func (o *CreateS3ExabaStorageParams) WithHTTPClient(client *http.Client) *CreateS3ExabaStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 exaba storage params +func (o *CreateS3ExabaStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 exaba storage params +func (o *CreateS3ExabaStorageParams) WithRequest(request *models.StorageCreateS3ExabaStorageRequest) *CreateS3ExabaStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 exaba storage params +func (o *CreateS3ExabaStorageParams) SetRequest(request *models.StorageCreateS3ExabaStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3ExabaStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_exaba_storage_responses.go b/client/swagger/http/storage/create_s3_exaba_storage_responses.go new file mode 100644 index 000000000..63e8a9634 --- /dev/null +++ b/client/swagger/http/storage/create_s3_exaba_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3ExabaStorageReader is a Reader for the CreateS3ExabaStorage structure. +type CreateS3ExabaStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3ExabaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3ExabaStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3ExabaStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3ExabaStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/exaba] CreateS3ExabaStorage", response, response.Code()) + } +} + +// NewCreateS3ExabaStorageOK creates a CreateS3ExabaStorageOK with default headers values +func NewCreateS3ExabaStorageOK() *CreateS3ExabaStorageOK { + return &CreateS3ExabaStorageOK{} +} + +/* +CreateS3ExabaStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3ExabaStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 exaba storage o k response has a 2xx status code +func (o *CreateS3ExabaStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 exaba storage o k response has a 3xx status code +func (o *CreateS3ExabaStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 exaba storage o k response has a 4xx status code +func (o *CreateS3ExabaStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 exaba storage o k response has a 5xx status code +func (o *CreateS3ExabaStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 exaba storage o k response a status code equal to that given +func (o *CreateS3ExabaStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 exaba storage o k response +func (o *CreateS3ExabaStorageOK) Code() int { + return 200 +} + +func (o *CreateS3ExabaStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/exaba][%d] createS3ExabaStorageOK %s", 200, payload) +} + +func (o *CreateS3ExabaStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/exaba][%d] createS3ExabaStorageOK %s", 200, payload) +} + +func (o *CreateS3ExabaStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3ExabaStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3ExabaStorageBadRequest creates a CreateS3ExabaStorageBadRequest with default headers values +func NewCreateS3ExabaStorageBadRequest() *CreateS3ExabaStorageBadRequest { + return &CreateS3ExabaStorageBadRequest{} +} + +/* +CreateS3ExabaStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3ExabaStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 exaba storage bad request response has a 2xx status code +func (o *CreateS3ExabaStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 exaba storage bad request response has a 3xx status code +func (o *CreateS3ExabaStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 exaba storage bad request response has a 4xx status code +func (o *CreateS3ExabaStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 exaba storage bad request response has a 5xx status code +func (o *CreateS3ExabaStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 exaba storage bad request response a status code equal to that given +func (o *CreateS3ExabaStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 exaba storage bad request response +func (o *CreateS3ExabaStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3ExabaStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/exaba][%d] createS3ExabaStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3ExabaStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/exaba][%d] createS3ExabaStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3ExabaStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3ExabaStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3ExabaStorageInternalServerError creates a CreateS3ExabaStorageInternalServerError with default headers values +func NewCreateS3ExabaStorageInternalServerError() *CreateS3ExabaStorageInternalServerError { + return &CreateS3ExabaStorageInternalServerError{} +} + +/* +CreateS3ExabaStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3ExabaStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 exaba storage internal server error response has a 2xx status code +func (o *CreateS3ExabaStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 exaba storage internal server error response has a 3xx status code +func (o *CreateS3ExabaStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 exaba storage internal server error response has a 4xx status code +func (o *CreateS3ExabaStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 exaba storage internal server error response has a 5xx status code +func (o *CreateS3ExabaStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 exaba storage internal server error response a status code equal to that given +func (o *CreateS3ExabaStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 exaba storage internal server error response +func (o *CreateS3ExabaStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3ExabaStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/exaba][%d] createS3ExabaStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3ExabaStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/exaba][%d] createS3ExabaStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3ExabaStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3ExabaStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_file_lu_storage_parameters.go b/client/swagger/http/storage/create_s3_file_lu_storage_parameters.go new file mode 100644 index 000000000..c7ba39f68 --- /dev/null +++ b/client/swagger/http/storage/create_s3_file_lu_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3FileLuStorageParams creates a new CreateS3FileLuStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3FileLuStorageParams() *CreateS3FileLuStorageParams { + return &CreateS3FileLuStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3FileLuStorageParamsWithTimeout creates a new CreateS3FileLuStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3FileLuStorageParamsWithTimeout(timeout time.Duration) *CreateS3FileLuStorageParams { + return &CreateS3FileLuStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3FileLuStorageParamsWithContext creates a new CreateS3FileLuStorageParams object +// with the ability to set a context for a request. +func NewCreateS3FileLuStorageParamsWithContext(ctx context.Context) *CreateS3FileLuStorageParams { + return &CreateS3FileLuStorageParams{ + Context: ctx, + } +} + +// NewCreateS3FileLuStorageParamsWithHTTPClient creates a new CreateS3FileLuStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3FileLuStorageParamsWithHTTPClient(client *http.Client) *CreateS3FileLuStorageParams { + return &CreateS3FileLuStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3FileLuStorageParams contains all the parameters to send to the API endpoint + + for the create s3 file lu storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3FileLuStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3FileLuStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 file lu storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3FileLuStorageParams) WithDefaults() *CreateS3FileLuStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 file lu storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3FileLuStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 file lu storage params +func (o *CreateS3FileLuStorageParams) WithTimeout(timeout time.Duration) *CreateS3FileLuStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 file lu storage params +func (o *CreateS3FileLuStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 file lu storage params +func (o *CreateS3FileLuStorageParams) WithContext(ctx context.Context) *CreateS3FileLuStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 file lu storage params +func (o *CreateS3FileLuStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 file lu storage params +func (o *CreateS3FileLuStorageParams) WithHTTPClient(client *http.Client) *CreateS3FileLuStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 file lu storage params +func (o *CreateS3FileLuStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 file lu storage params +func (o *CreateS3FileLuStorageParams) WithRequest(request *models.StorageCreateS3FileLuStorageRequest) *CreateS3FileLuStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 file lu storage params +func (o *CreateS3FileLuStorageParams) SetRequest(request *models.StorageCreateS3FileLuStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3FileLuStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_file_lu_storage_responses.go b/client/swagger/http/storage/create_s3_file_lu_storage_responses.go new file mode 100644 index 000000000..0451963d1 --- /dev/null +++ b/client/swagger/http/storage/create_s3_file_lu_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3FileLuStorageReader is a Reader for the CreateS3FileLuStorage structure. +type CreateS3FileLuStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3FileLuStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3FileLuStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3FileLuStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3FileLuStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/filelu] CreateS3FileLuStorage", response, response.Code()) + } +} + +// NewCreateS3FileLuStorageOK creates a CreateS3FileLuStorageOK with default headers values +func NewCreateS3FileLuStorageOK() *CreateS3FileLuStorageOK { + return &CreateS3FileLuStorageOK{} +} + +/* +CreateS3FileLuStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3FileLuStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 file lu storage o k response has a 2xx status code +func (o *CreateS3FileLuStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 file lu storage o k response has a 3xx status code +func (o *CreateS3FileLuStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 file lu storage o k response has a 4xx status code +func (o *CreateS3FileLuStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 file lu storage o k response has a 5xx status code +func (o *CreateS3FileLuStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 file lu storage o k response a status code equal to that given +func (o *CreateS3FileLuStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 file lu storage o k response +func (o *CreateS3FileLuStorageOK) Code() int { + return 200 +} + +func (o *CreateS3FileLuStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/filelu][%d] createS3FileLuStorageOK %s", 200, payload) +} + +func (o *CreateS3FileLuStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/filelu][%d] createS3FileLuStorageOK %s", 200, payload) +} + +func (o *CreateS3FileLuStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3FileLuStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3FileLuStorageBadRequest creates a CreateS3FileLuStorageBadRequest with default headers values +func NewCreateS3FileLuStorageBadRequest() *CreateS3FileLuStorageBadRequest { + return &CreateS3FileLuStorageBadRequest{} +} + +/* +CreateS3FileLuStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3FileLuStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 file lu storage bad request response has a 2xx status code +func (o *CreateS3FileLuStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 file lu storage bad request response has a 3xx status code +func (o *CreateS3FileLuStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 file lu storage bad request response has a 4xx status code +func (o *CreateS3FileLuStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 file lu storage bad request response has a 5xx status code +func (o *CreateS3FileLuStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 file lu storage bad request response a status code equal to that given +func (o *CreateS3FileLuStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 file lu storage bad request response +func (o *CreateS3FileLuStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3FileLuStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/filelu][%d] createS3FileLuStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3FileLuStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/filelu][%d] createS3FileLuStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3FileLuStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3FileLuStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3FileLuStorageInternalServerError creates a CreateS3FileLuStorageInternalServerError with default headers values +func NewCreateS3FileLuStorageInternalServerError() *CreateS3FileLuStorageInternalServerError { + return &CreateS3FileLuStorageInternalServerError{} +} + +/* +CreateS3FileLuStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3FileLuStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 file lu storage internal server error response has a 2xx status code +func (o *CreateS3FileLuStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 file lu storage internal server error response has a 3xx status code +func (o *CreateS3FileLuStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 file lu storage internal server error response has a 4xx status code +func (o *CreateS3FileLuStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 file lu storage internal server error response has a 5xx status code +func (o *CreateS3FileLuStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 file lu storage internal server error response a status code equal to that given +func (o *CreateS3FileLuStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 file lu storage internal server error response +func (o *CreateS3FileLuStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3FileLuStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/filelu][%d] createS3FileLuStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3FileLuStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/filelu][%d] createS3FileLuStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3FileLuStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3FileLuStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_flash_blade_storage_parameters.go b/client/swagger/http/storage/create_s3_flash_blade_storage_parameters.go new file mode 100644 index 000000000..a5a1fc1be --- /dev/null +++ b/client/swagger/http/storage/create_s3_flash_blade_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3FlashBladeStorageParams creates a new CreateS3FlashBladeStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3FlashBladeStorageParams() *CreateS3FlashBladeStorageParams { + return &CreateS3FlashBladeStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3FlashBladeStorageParamsWithTimeout creates a new CreateS3FlashBladeStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3FlashBladeStorageParamsWithTimeout(timeout time.Duration) *CreateS3FlashBladeStorageParams { + return &CreateS3FlashBladeStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3FlashBladeStorageParamsWithContext creates a new CreateS3FlashBladeStorageParams object +// with the ability to set a context for a request. +func NewCreateS3FlashBladeStorageParamsWithContext(ctx context.Context) *CreateS3FlashBladeStorageParams { + return &CreateS3FlashBladeStorageParams{ + Context: ctx, + } +} + +// NewCreateS3FlashBladeStorageParamsWithHTTPClient creates a new CreateS3FlashBladeStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3FlashBladeStorageParamsWithHTTPClient(client *http.Client) *CreateS3FlashBladeStorageParams { + return &CreateS3FlashBladeStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3FlashBladeStorageParams contains all the parameters to send to the API endpoint + + for the create s3 flash blade storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3FlashBladeStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3FlashBladeStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 flash blade storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3FlashBladeStorageParams) WithDefaults() *CreateS3FlashBladeStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 flash blade storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3FlashBladeStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 flash blade storage params +func (o *CreateS3FlashBladeStorageParams) WithTimeout(timeout time.Duration) *CreateS3FlashBladeStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 flash blade storage params +func (o *CreateS3FlashBladeStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 flash blade storage params +func (o *CreateS3FlashBladeStorageParams) WithContext(ctx context.Context) *CreateS3FlashBladeStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 flash blade storage params +func (o *CreateS3FlashBladeStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 flash blade storage params +func (o *CreateS3FlashBladeStorageParams) WithHTTPClient(client *http.Client) *CreateS3FlashBladeStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 flash blade storage params +func (o *CreateS3FlashBladeStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 flash blade storage params +func (o *CreateS3FlashBladeStorageParams) WithRequest(request *models.StorageCreateS3FlashBladeStorageRequest) *CreateS3FlashBladeStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 flash blade storage params +func (o *CreateS3FlashBladeStorageParams) SetRequest(request *models.StorageCreateS3FlashBladeStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3FlashBladeStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_flash_blade_storage_responses.go b/client/swagger/http/storage/create_s3_flash_blade_storage_responses.go new file mode 100644 index 000000000..faa5fd228 --- /dev/null +++ b/client/swagger/http/storage/create_s3_flash_blade_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3FlashBladeStorageReader is a Reader for the CreateS3FlashBladeStorage structure. +type CreateS3FlashBladeStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3FlashBladeStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3FlashBladeStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3FlashBladeStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3FlashBladeStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/flashblade] CreateS3FlashBladeStorage", response, response.Code()) + } +} + +// NewCreateS3FlashBladeStorageOK creates a CreateS3FlashBladeStorageOK with default headers values +func NewCreateS3FlashBladeStorageOK() *CreateS3FlashBladeStorageOK { + return &CreateS3FlashBladeStorageOK{} +} + +/* +CreateS3FlashBladeStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3FlashBladeStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 flash blade storage o k response has a 2xx status code +func (o *CreateS3FlashBladeStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 flash blade storage o k response has a 3xx status code +func (o *CreateS3FlashBladeStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 flash blade storage o k response has a 4xx status code +func (o *CreateS3FlashBladeStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 flash blade storage o k response has a 5xx status code +func (o *CreateS3FlashBladeStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 flash blade storage o k response a status code equal to that given +func (o *CreateS3FlashBladeStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 flash blade storage o k response +func (o *CreateS3FlashBladeStorageOK) Code() int { + return 200 +} + +func (o *CreateS3FlashBladeStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/flashblade][%d] createS3FlashBladeStorageOK %s", 200, payload) +} + +func (o *CreateS3FlashBladeStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/flashblade][%d] createS3FlashBladeStorageOK %s", 200, payload) +} + +func (o *CreateS3FlashBladeStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3FlashBladeStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3FlashBladeStorageBadRequest creates a CreateS3FlashBladeStorageBadRequest with default headers values +func NewCreateS3FlashBladeStorageBadRequest() *CreateS3FlashBladeStorageBadRequest { + return &CreateS3FlashBladeStorageBadRequest{} +} + +/* +CreateS3FlashBladeStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3FlashBladeStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 flash blade storage bad request response has a 2xx status code +func (o *CreateS3FlashBladeStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 flash blade storage bad request response has a 3xx status code +func (o *CreateS3FlashBladeStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 flash blade storage bad request response has a 4xx status code +func (o *CreateS3FlashBladeStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 flash blade storage bad request response has a 5xx status code +func (o *CreateS3FlashBladeStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 flash blade storage bad request response a status code equal to that given +func (o *CreateS3FlashBladeStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 flash blade storage bad request response +func (o *CreateS3FlashBladeStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3FlashBladeStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/flashblade][%d] createS3FlashBladeStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3FlashBladeStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/flashblade][%d] createS3FlashBladeStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3FlashBladeStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3FlashBladeStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3FlashBladeStorageInternalServerError creates a CreateS3FlashBladeStorageInternalServerError with default headers values +func NewCreateS3FlashBladeStorageInternalServerError() *CreateS3FlashBladeStorageInternalServerError { + return &CreateS3FlashBladeStorageInternalServerError{} +} + +/* +CreateS3FlashBladeStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3FlashBladeStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 flash blade storage internal server error response has a 2xx status code +func (o *CreateS3FlashBladeStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 flash blade storage internal server error response has a 3xx status code +func (o *CreateS3FlashBladeStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 flash blade storage internal server error response has a 4xx status code +func (o *CreateS3FlashBladeStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 flash blade storage internal server error response has a 5xx status code +func (o *CreateS3FlashBladeStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 flash blade storage internal server error response a status code equal to that given +func (o *CreateS3FlashBladeStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 flash blade storage internal server error response +func (o *CreateS3FlashBladeStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3FlashBladeStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/flashblade][%d] createS3FlashBladeStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3FlashBladeStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/flashblade][%d] createS3FlashBladeStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3FlashBladeStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3FlashBladeStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_hetzner_storage_parameters.go b/client/swagger/http/storage/create_s3_hetzner_storage_parameters.go new file mode 100644 index 000000000..c4fa6c77b --- /dev/null +++ b/client/swagger/http/storage/create_s3_hetzner_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3HetznerStorageParams creates a new CreateS3HetznerStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3HetznerStorageParams() *CreateS3HetznerStorageParams { + return &CreateS3HetznerStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3HetznerStorageParamsWithTimeout creates a new CreateS3HetznerStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3HetznerStorageParamsWithTimeout(timeout time.Duration) *CreateS3HetznerStorageParams { + return &CreateS3HetznerStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3HetznerStorageParamsWithContext creates a new CreateS3HetznerStorageParams object +// with the ability to set a context for a request. +func NewCreateS3HetznerStorageParamsWithContext(ctx context.Context) *CreateS3HetznerStorageParams { + return &CreateS3HetznerStorageParams{ + Context: ctx, + } +} + +// NewCreateS3HetznerStorageParamsWithHTTPClient creates a new CreateS3HetznerStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3HetznerStorageParamsWithHTTPClient(client *http.Client) *CreateS3HetznerStorageParams { + return &CreateS3HetznerStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3HetznerStorageParams contains all the parameters to send to the API endpoint + + for the create s3 hetzner storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3HetznerStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3HetznerStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 hetzner storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3HetznerStorageParams) WithDefaults() *CreateS3HetznerStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 hetzner storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3HetznerStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 hetzner storage params +func (o *CreateS3HetznerStorageParams) WithTimeout(timeout time.Duration) *CreateS3HetznerStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 hetzner storage params +func (o *CreateS3HetznerStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 hetzner storage params +func (o *CreateS3HetznerStorageParams) WithContext(ctx context.Context) *CreateS3HetznerStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 hetzner storage params +func (o *CreateS3HetznerStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 hetzner storage params +func (o *CreateS3HetznerStorageParams) WithHTTPClient(client *http.Client) *CreateS3HetznerStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 hetzner storage params +func (o *CreateS3HetznerStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 hetzner storage params +func (o *CreateS3HetznerStorageParams) WithRequest(request *models.StorageCreateS3HetznerStorageRequest) *CreateS3HetznerStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 hetzner storage params +func (o *CreateS3HetznerStorageParams) SetRequest(request *models.StorageCreateS3HetznerStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3HetznerStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_hetzner_storage_responses.go b/client/swagger/http/storage/create_s3_hetzner_storage_responses.go new file mode 100644 index 000000000..6538f3047 --- /dev/null +++ b/client/swagger/http/storage/create_s3_hetzner_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3HetznerStorageReader is a Reader for the CreateS3HetznerStorage structure. +type CreateS3HetznerStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3HetznerStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3HetznerStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3HetznerStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3HetznerStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/hetzner] CreateS3HetznerStorage", response, response.Code()) + } +} + +// NewCreateS3HetznerStorageOK creates a CreateS3HetznerStorageOK with default headers values +func NewCreateS3HetznerStorageOK() *CreateS3HetznerStorageOK { + return &CreateS3HetznerStorageOK{} +} + +/* +CreateS3HetznerStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3HetznerStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 hetzner storage o k response has a 2xx status code +func (o *CreateS3HetznerStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 hetzner storage o k response has a 3xx status code +func (o *CreateS3HetznerStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 hetzner storage o k response has a 4xx status code +func (o *CreateS3HetznerStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 hetzner storage o k response has a 5xx status code +func (o *CreateS3HetznerStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 hetzner storage o k response a status code equal to that given +func (o *CreateS3HetznerStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 hetzner storage o k response +func (o *CreateS3HetznerStorageOK) Code() int { + return 200 +} + +func (o *CreateS3HetznerStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/hetzner][%d] createS3HetznerStorageOK %s", 200, payload) +} + +func (o *CreateS3HetznerStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/hetzner][%d] createS3HetznerStorageOK %s", 200, payload) +} + +func (o *CreateS3HetznerStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3HetznerStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3HetznerStorageBadRequest creates a CreateS3HetznerStorageBadRequest with default headers values +func NewCreateS3HetznerStorageBadRequest() *CreateS3HetznerStorageBadRequest { + return &CreateS3HetznerStorageBadRequest{} +} + +/* +CreateS3HetznerStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3HetznerStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 hetzner storage bad request response has a 2xx status code +func (o *CreateS3HetznerStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 hetzner storage bad request response has a 3xx status code +func (o *CreateS3HetznerStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 hetzner storage bad request response has a 4xx status code +func (o *CreateS3HetznerStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 hetzner storage bad request response has a 5xx status code +func (o *CreateS3HetznerStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 hetzner storage bad request response a status code equal to that given +func (o *CreateS3HetznerStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 hetzner storage bad request response +func (o *CreateS3HetznerStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3HetznerStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/hetzner][%d] createS3HetznerStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3HetznerStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/hetzner][%d] createS3HetznerStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3HetznerStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3HetznerStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3HetznerStorageInternalServerError creates a CreateS3HetznerStorageInternalServerError with default headers values +func NewCreateS3HetznerStorageInternalServerError() *CreateS3HetznerStorageInternalServerError { + return &CreateS3HetznerStorageInternalServerError{} +} + +/* +CreateS3HetznerStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3HetznerStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 hetzner storage internal server error response has a 2xx status code +func (o *CreateS3HetznerStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 hetzner storage internal server error response has a 3xx status code +func (o *CreateS3HetznerStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 hetzner storage internal server error response has a 4xx status code +func (o *CreateS3HetznerStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 hetzner storage internal server error response has a 5xx status code +func (o *CreateS3HetznerStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 hetzner storage internal server error response a status code equal to that given +func (o *CreateS3HetznerStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 hetzner storage internal server error response +func (o *CreateS3HetznerStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3HetznerStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/hetzner][%d] createS3HetznerStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3HetznerStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/hetzner][%d] createS3HetznerStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3HetznerStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3HetznerStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_intercolo_storage_parameters.go b/client/swagger/http/storage/create_s3_intercolo_storage_parameters.go new file mode 100644 index 000000000..4a4086b9d --- /dev/null +++ b/client/swagger/http/storage/create_s3_intercolo_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3IntercoloStorageParams creates a new CreateS3IntercoloStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3IntercoloStorageParams() *CreateS3IntercoloStorageParams { + return &CreateS3IntercoloStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3IntercoloStorageParamsWithTimeout creates a new CreateS3IntercoloStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3IntercoloStorageParamsWithTimeout(timeout time.Duration) *CreateS3IntercoloStorageParams { + return &CreateS3IntercoloStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3IntercoloStorageParamsWithContext creates a new CreateS3IntercoloStorageParams object +// with the ability to set a context for a request. +func NewCreateS3IntercoloStorageParamsWithContext(ctx context.Context) *CreateS3IntercoloStorageParams { + return &CreateS3IntercoloStorageParams{ + Context: ctx, + } +} + +// NewCreateS3IntercoloStorageParamsWithHTTPClient creates a new CreateS3IntercoloStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3IntercoloStorageParamsWithHTTPClient(client *http.Client) *CreateS3IntercoloStorageParams { + return &CreateS3IntercoloStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3IntercoloStorageParams contains all the parameters to send to the API endpoint + + for the create s3 intercolo storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3IntercoloStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3IntercoloStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 intercolo storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3IntercoloStorageParams) WithDefaults() *CreateS3IntercoloStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 intercolo storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3IntercoloStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 intercolo storage params +func (o *CreateS3IntercoloStorageParams) WithTimeout(timeout time.Duration) *CreateS3IntercoloStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 intercolo storage params +func (o *CreateS3IntercoloStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 intercolo storage params +func (o *CreateS3IntercoloStorageParams) WithContext(ctx context.Context) *CreateS3IntercoloStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 intercolo storage params +func (o *CreateS3IntercoloStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 intercolo storage params +func (o *CreateS3IntercoloStorageParams) WithHTTPClient(client *http.Client) *CreateS3IntercoloStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 intercolo storage params +func (o *CreateS3IntercoloStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 intercolo storage params +func (o *CreateS3IntercoloStorageParams) WithRequest(request *models.StorageCreateS3IntercoloStorageRequest) *CreateS3IntercoloStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 intercolo storage params +func (o *CreateS3IntercoloStorageParams) SetRequest(request *models.StorageCreateS3IntercoloStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3IntercoloStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_intercolo_storage_responses.go b/client/swagger/http/storage/create_s3_intercolo_storage_responses.go new file mode 100644 index 000000000..f730cecf5 --- /dev/null +++ b/client/swagger/http/storage/create_s3_intercolo_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3IntercoloStorageReader is a Reader for the CreateS3IntercoloStorage structure. +type CreateS3IntercoloStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3IntercoloStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3IntercoloStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3IntercoloStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3IntercoloStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/intercolo] CreateS3IntercoloStorage", response, response.Code()) + } +} + +// NewCreateS3IntercoloStorageOK creates a CreateS3IntercoloStorageOK with default headers values +func NewCreateS3IntercoloStorageOK() *CreateS3IntercoloStorageOK { + return &CreateS3IntercoloStorageOK{} +} + +/* +CreateS3IntercoloStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3IntercoloStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 intercolo storage o k response has a 2xx status code +func (o *CreateS3IntercoloStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 intercolo storage o k response has a 3xx status code +func (o *CreateS3IntercoloStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 intercolo storage o k response has a 4xx status code +func (o *CreateS3IntercoloStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 intercolo storage o k response has a 5xx status code +func (o *CreateS3IntercoloStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 intercolo storage o k response a status code equal to that given +func (o *CreateS3IntercoloStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 intercolo storage o k response +func (o *CreateS3IntercoloStorageOK) Code() int { + return 200 +} + +func (o *CreateS3IntercoloStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/intercolo][%d] createS3IntercoloStorageOK %s", 200, payload) +} + +func (o *CreateS3IntercoloStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/intercolo][%d] createS3IntercoloStorageOK %s", 200, payload) +} + +func (o *CreateS3IntercoloStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3IntercoloStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3IntercoloStorageBadRequest creates a CreateS3IntercoloStorageBadRequest with default headers values +func NewCreateS3IntercoloStorageBadRequest() *CreateS3IntercoloStorageBadRequest { + return &CreateS3IntercoloStorageBadRequest{} +} + +/* +CreateS3IntercoloStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3IntercoloStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 intercolo storage bad request response has a 2xx status code +func (o *CreateS3IntercoloStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 intercolo storage bad request response has a 3xx status code +func (o *CreateS3IntercoloStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 intercolo storage bad request response has a 4xx status code +func (o *CreateS3IntercoloStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 intercolo storage bad request response has a 5xx status code +func (o *CreateS3IntercoloStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 intercolo storage bad request response a status code equal to that given +func (o *CreateS3IntercoloStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 intercolo storage bad request response +func (o *CreateS3IntercoloStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3IntercoloStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/intercolo][%d] createS3IntercoloStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3IntercoloStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/intercolo][%d] createS3IntercoloStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3IntercoloStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3IntercoloStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3IntercoloStorageInternalServerError creates a CreateS3IntercoloStorageInternalServerError with default headers values +func NewCreateS3IntercoloStorageInternalServerError() *CreateS3IntercoloStorageInternalServerError { + return &CreateS3IntercoloStorageInternalServerError{} +} + +/* +CreateS3IntercoloStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3IntercoloStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 intercolo storage internal server error response has a 2xx status code +func (o *CreateS3IntercoloStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 intercolo storage internal server error response has a 3xx status code +func (o *CreateS3IntercoloStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 intercolo storage internal server error response has a 4xx status code +func (o *CreateS3IntercoloStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 intercolo storage internal server error response has a 5xx status code +func (o *CreateS3IntercoloStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 intercolo storage internal server error response a status code equal to that given +func (o *CreateS3IntercoloStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 intercolo storage internal server error response +func (o *CreateS3IntercoloStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3IntercoloStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/intercolo][%d] createS3IntercoloStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3IntercoloStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/intercolo][%d] createS3IntercoloStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3IntercoloStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3IntercoloStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_mega_storage_parameters.go b/client/swagger/http/storage/create_s3_mega_storage_parameters.go new file mode 100644 index 000000000..dc8608482 --- /dev/null +++ b/client/swagger/http/storage/create_s3_mega_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3MegaStorageParams creates a new CreateS3MegaStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3MegaStorageParams() *CreateS3MegaStorageParams { + return &CreateS3MegaStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3MegaStorageParamsWithTimeout creates a new CreateS3MegaStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3MegaStorageParamsWithTimeout(timeout time.Duration) *CreateS3MegaStorageParams { + return &CreateS3MegaStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3MegaStorageParamsWithContext creates a new CreateS3MegaStorageParams object +// with the ability to set a context for a request. +func NewCreateS3MegaStorageParamsWithContext(ctx context.Context) *CreateS3MegaStorageParams { + return &CreateS3MegaStorageParams{ + Context: ctx, + } +} + +// NewCreateS3MegaStorageParamsWithHTTPClient creates a new CreateS3MegaStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3MegaStorageParamsWithHTTPClient(client *http.Client) *CreateS3MegaStorageParams { + return &CreateS3MegaStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3MegaStorageParams contains all the parameters to send to the API endpoint + + for the create s3 mega storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3MegaStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3MegaStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 mega storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3MegaStorageParams) WithDefaults() *CreateS3MegaStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 mega storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3MegaStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 mega storage params +func (o *CreateS3MegaStorageParams) WithTimeout(timeout time.Duration) *CreateS3MegaStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 mega storage params +func (o *CreateS3MegaStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 mega storage params +func (o *CreateS3MegaStorageParams) WithContext(ctx context.Context) *CreateS3MegaStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 mega storage params +func (o *CreateS3MegaStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 mega storage params +func (o *CreateS3MegaStorageParams) WithHTTPClient(client *http.Client) *CreateS3MegaStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 mega storage params +func (o *CreateS3MegaStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 mega storage params +func (o *CreateS3MegaStorageParams) WithRequest(request *models.StorageCreateS3MegaStorageRequest) *CreateS3MegaStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 mega storage params +func (o *CreateS3MegaStorageParams) SetRequest(request *models.StorageCreateS3MegaStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3MegaStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_mega_storage_responses.go b/client/swagger/http/storage/create_s3_mega_storage_responses.go new file mode 100644 index 000000000..6c4cc6e88 --- /dev/null +++ b/client/swagger/http/storage/create_s3_mega_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3MegaStorageReader is a Reader for the CreateS3MegaStorage structure. +type CreateS3MegaStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3MegaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3MegaStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3MegaStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3MegaStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/mega] CreateS3MegaStorage", response, response.Code()) + } +} + +// NewCreateS3MegaStorageOK creates a CreateS3MegaStorageOK with default headers values +func NewCreateS3MegaStorageOK() *CreateS3MegaStorageOK { + return &CreateS3MegaStorageOK{} +} + +/* +CreateS3MegaStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3MegaStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 mega storage o k response has a 2xx status code +func (o *CreateS3MegaStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 mega storage o k response has a 3xx status code +func (o *CreateS3MegaStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 mega storage o k response has a 4xx status code +func (o *CreateS3MegaStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 mega storage o k response has a 5xx status code +func (o *CreateS3MegaStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 mega storage o k response a status code equal to that given +func (o *CreateS3MegaStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 mega storage o k response +func (o *CreateS3MegaStorageOK) Code() int { + return 200 +} + +func (o *CreateS3MegaStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/mega][%d] createS3MegaStorageOK %s", 200, payload) +} + +func (o *CreateS3MegaStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/mega][%d] createS3MegaStorageOK %s", 200, payload) +} + +func (o *CreateS3MegaStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3MegaStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3MegaStorageBadRequest creates a CreateS3MegaStorageBadRequest with default headers values +func NewCreateS3MegaStorageBadRequest() *CreateS3MegaStorageBadRequest { + return &CreateS3MegaStorageBadRequest{} +} + +/* +CreateS3MegaStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3MegaStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 mega storage bad request response has a 2xx status code +func (o *CreateS3MegaStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 mega storage bad request response has a 3xx status code +func (o *CreateS3MegaStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 mega storage bad request response has a 4xx status code +func (o *CreateS3MegaStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 mega storage bad request response has a 5xx status code +func (o *CreateS3MegaStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 mega storage bad request response a status code equal to that given +func (o *CreateS3MegaStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 mega storage bad request response +func (o *CreateS3MegaStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3MegaStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/mega][%d] createS3MegaStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3MegaStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/mega][%d] createS3MegaStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3MegaStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3MegaStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3MegaStorageInternalServerError creates a CreateS3MegaStorageInternalServerError with default headers values +func NewCreateS3MegaStorageInternalServerError() *CreateS3MegaStorageInternalServerError { + return &CreateS3MegaStorageInternalServerError{} +} + +/* +CreateS3MegaStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3MegaStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 mega storage internal server error response has a 2xx status code +func (o *CreateS3MegaStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 mega storage internal server error response has a 3xx status code +func (o *CreateS3MegaStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 mega storage internal server error response has a 4xx status code +func (o *CreateS3MegaStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 mega storage internal server error response has a 5xx status code +func (o *CreateS3MegaStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 mega storage internal server error response a status code equal to that given +func (o *CreateS3MegaStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 mega storage internal server error response +func (o *CreateS3MegaStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3MegaStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/mega][%d] createS3MegaStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3MegaStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/mega][%d] createS3MegaStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3MegaStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3MegaStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_o_v_hcloud_storage_parameters.go b/client/swagger/http/storage/create_s3_o_v_hcloud_storage_parameters.go new file mode 100644 index 000000000..fbbf0efcd --- /dev/null +++ b/client/swagger/http/storage/create_s3_o_v_hcloud_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3OVHcloudStorageParams creates a new CreateS3OVHcloudStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3OVHcloudStorageParams() *CreateS3OVHcloudStorageParams { + return &CreateS3OVHcloudStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3OVHcloudStorageParamsWithTimeout creates a new CreateS3OVHcloudStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3OVHcloudStorageParamsWithTimeout(timeout time.Duration) *CreateS3OVHcloudStorageParams { + return &CreateS3OVHcloudStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3OVHcloudStorageParamsWithContext creates a new CreateS3OVHcloudStorageParams object +// with the ability to set a context for a request. +func NewCreateS3OVHcloudStorageParamsWithContext(ctx context.Context) *CreateS3OVHcloudStorageParams { + return &CreateS3OVHcloudStorageParams{ + Context: ctx, + } +} + +// NewCreateS3OVHcloudStorageParamsWithHTTPClient creates a new CreateS3OVHcloudStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3OVHcloudStorageParamsWithHTTPClient(client *http.Client) *CreateS3OVHcloudStorageParams { + return &CreateS3OVHcloudStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3OVHcloudStorageParams contains all the parameters to send to the API endpoint + + for the create s3 o v hcloud storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3OVHcloudStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3OVHcloudStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 o v hcloud storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3OVHcloudStorageParams) WithDefaults() *CreateS3OVHcloudStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 o v hcloud storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3OVHcloudStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 o v hcloud storage params +func (o *CreateS3OVHcloudStorageParams) WithTimeout(timeout time.Duration) *CreateS3OVHcloudStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 o v hcloud storage params +func (o *CreateS3OVHcloudStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 o v hcloud storage params +func (o *CreateS3OVHcloudStorageParams) WithContext(ctx context.Context) *CreateS3OVHcloudStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 o v hcloud storage params +func (o *CreateS3OVHcloudStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 o v hcloud storage params +func (o *CreateS3OVHcloudStorageParams) WithHTTPClient(client *http.Client) *CreateS3OVHcloudStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 o v hcloud storage params +func (o *CreateS3OVHcloudStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 o v hcloud storage params +func (o *CreateS3OVHcloudStorageParams) WithRequest(request *models.StorageCreateS3OVHcloudStorageRequest) *CreateS3OVHcloudStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 o v hcloud storage params +func (o *CreateS3OVHcloudStorageParams) SetRequest(request *models.StorageCreateS3OVHcloudStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3OVHcloudStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_o_v_hcloud_storage_responses.go b/client/swagger/http/storage/create_s3_o_v_hcloud_storage_responses.go new file mode 100644 index 000000000..7f384cf2e --- /dev/null +++ b/client/swagger/http/storage/create_s3_o_v_hcloud_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3OVHcloudStorageReader is a Reader for the CreateS3OVHcloudStorage structure. +type CreateS3OVHcloudStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3OVHcloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3OVHcloudStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3OVHcloudStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3OVHcloudStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/ovhcloud] CreateS3OVHcloudStorage", response, response.Code()) + } +} + +// NewCreateS3OVHcloudStorageOK creates a CreateS3OVHcloudStorageOK with default headers values +func NewCreateS3OVHcloudStorageOK() *CreateS3OVHcloudStorageOK { + return &CreateS3OVHcloudStorageOK{} +} + +/* +CreateS3OVHcloudStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3OVHcloudStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 o v hcloud storage o k response has a 2xx status code +func (o *CreateS3OVHcloudStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 o v hcloud storage o k response has a 3xx status code +func (o *CreateS3OVHcloudStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 o v hcloud storage o k response has a 4xx status code +func (o *CreateS3OVHcloudStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 o v hcloud storage o k response has a 5xx status code +func (o *CreateS3OVHcloudStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 o v hcloud storage o k response a status code equal to that given +func (o *CreateS3OVHcloudStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 o v hcloud storage o k response +func (o *CreateS3OVHcloudStorageOK) Code() int { + return 200 +} + +func (o *CreateS3OVHcloudStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/ovhcloud][%d] createS3OVHcloudStorageOK %s", 200, payload) +} + +func (o *CreateS3OVHcloudStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/ovhcloud][%d] createS3OVHcloudStorageOK %s", 200, payload) +} + +func (o *CreateS3OVHcloudStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3OVHcloudStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3OVHcloudStorageBadRequest creates a CreateS3OVHcloudStorageBadRequest with default headers values +func NewCreateS3OVHcloudStorageBadRequest() *CreateS3OVHcloudStorageBadRequest { + return &CreateS3OVHcloudStorageBadRequest{} +} + +/* +CreateS3OVHcloudStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3OVHcloudStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 o v hcloud storage bad request response has a 2xx status code +func (o *CreateS3OVHcloudStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 o v hcloud storage bad request response has a 3xx status code +func (o *CreateS3OVHcloudStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 o v hcloud storage bad request response has a 4xx status code +func (o *CreateS3OVHcloudStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 o v hcloud storage bad request response has a 5xx status code +func (o *CreateS3OVHcloudStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 o v hcloud storage bad request response a status code equal to that given +func (o *CreateS3OVHcloudStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 o v hcloud storage bad request response +func (o *CreateS3OVHcloudStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3OVHcloudStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/ovhcloud][%d] createS3OVHcloudStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3OVHcloudStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/ovhcloud][%d] createS3OVHcloudStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3OVHcloudStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3OVHcloudStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3OVHcloudStorageInternalServerError creates a CreateS3OVHcloudStorageInternalServerError with default headers values +func NewCreateS3OVHcloudStorageInternalServerError() *CreateS3OVHcloudStorageInternalServerError { + return &CreateS3OVHcloudStorageInternalServerError{} +} + +/* +CreateS3OVHcloudStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3OVHcloudStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 o v hcloud storage internal server error response has a 2xx status code +func (o *CreateS3OVHcloudStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 o v hcloud storage internal server error response has a 3xx status code +func (o *CreateS3OVHcloudStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 o v hcloud storage internal server error response has a 4xx status code +func (o *CreateS3OVHcloudStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 o v hcloud storage internal server error response has a 5xx status code +func (o *CreateS3OVHcloudStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 o v hcloud storage internal server error response a status code equal to that given +func (o *CreateS3OVHcloudStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 o v hcloud storage internal server error response +func (o *CreateS3OVHcloudStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3OVHcloudStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/ovhcloud][%d] createS3OVHcloudStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3OVHcloudStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/ovhcloud][%d] createS3OVHcloudStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3OVHcloudStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3OVHcloudStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_outscale_storage_parameters.go b/client/swagger/http/storage/create_s3_outscale_storage_parameters.go new file mode 100644 index 000000000..3d83913ec --- /dev/null +++ b/client/swagger/http/storage/create_s3_outscale_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3OutscaleStorageParams creates a new CreateS3OutscaleStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3OutscaleStorageParams() *CreateS3OutscaleStorageParams { + return &CreateS3OutscaleStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3OutscaleStorageParamsWithTimeout creates a new CreateS3OutscaleStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3OutscaleStorageParamsWithTimeout(timeout time.Duration) *CreateS3OutscaleStorageParams { + return &CreateS3OutscaleStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3OutscaleStorageParamsWithContext creates a new CreateS3OutscaleStorageParams object +// with the ability to set a context for a request. +func NewCreateS3OutscaleStorageParamsWithContext(ctx context.Context) *CreateS3OutscaleStorageParams { + return &CreateS3OutscaleStorageParams{ + Context: ctx, + } +} + +// NewCreateS3OutscaleStorageParamsWithHTTPClient creates a new CreateS3OutscaleStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3OutscaleStorageParamsWithHTTPClient(client *http.Client) *CreateS3OutscaleStorageParams { + return &CreateS3OutscaleStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3OutscaleStorageParams contains all the parameters to send to the API endpoint + + for the create s3 outscale storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3OutscaleStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3OutscaleStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 outscale storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3OutscaleStorageParams) WithDefaults() *CreateS3OutscaleStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 outscale storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3OutscaleStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 outscale storage params +func (o *CreateS3OutscaleStorageParams) WithTimeout(timeout time.Duration) *CreateS3OutscaleStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 outscale storage params +func (o *CreateS3OutscaleStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 outscale storage params +func (o *CreateS3OutscaleStorageParams) WithContext(ctx context.Context) *CreateS3OutscaleStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 outscale storage params +func (o *CreateS3OutscaleStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 outscale storage params +func (o *CreateS3OutscaleStorageParams) WithHTTPClient(client *http.Client) *CreateS3OutscaleStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 outscale storage params +func (o *CreateS3OutscaleStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 outscale storage params +func (o *CreateS3OutscaleStorageParams) WithRequest(request *models.StorageCreateS3OutscaleStorageRequest) *CreateS3OutscaleStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 outscale storage params +func (o *CreateS3OutscaleStorageParams) SetRequest(request *models.StorageCreateS3OutscaleStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3OutscaleStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_outscale_storage_responses.go b/client/swagger/http/storage/create_s3_outscale_storage_responses.go new file mode 100644 index 000000000..cc7add1ac --- /dev/null +++ b/client/swagger/http/storage/create_s3_outscale_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3OutscaleStorageReader is a Reader for the CreateS3OutscaleStorage structure. +type CreateS3OutscaleStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3OutscaleStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3OutscaleStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3OutscaleStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3OutscaleStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/outscale] CreateS3OutscaleStorage", response, response.Code()) + } +} + +// NewCreateS3OutscaleStorageOK creates a CreateS3OutscaleStorageOK with default headers values +func NewCreateS3OutscaleStorageOK() *CreateS3OutscaleStorageOK { + return &CreateS3OutscaleStorageOK{} +} + +/* +CreateS3OutscaleStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3OutscaleStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 outscale storage o k response has a 2xx status code +func (o *CreateS3OutscaleStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 outscale storage o k response has a 3xx status code +func (o *CreateS3OutscaleStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 outscale storage o k response has a 4xx status code +func (o *CreateS3OutscaleStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 outscale storage o k response has a 5xx status code +func (o *CreateS3OutscaleStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 outscale storage o k response a status code equal to that given +func (o *CreateS3OutscaleStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 outscale storage o k response +func (o *CreateS3OutscaleStorageOK) Code() int { + return 200 +} + +func (o *CreateS3OutscaleStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/outscale][%d] createS3OutscaleStorageOK %s", 200, payload) +} + +func (o *CreateS3OutscaleStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/outscale][%d] createS3OutscaleStorageOK %s", 200, payload) +} + +func (o *CreateS3OutscaleStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3OutscaleStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3OutscaleStorageBadRequest creates a CreateS3OutscaleStorageBadRequest with default headers values +func NewCreateS3OutscaleStorageBadRequest() *CreateS3OutscaleStorageBadRequest { + return &CreateS3OutscaleStorageBadRequest{} +} + +/* +CreateS3OutscaleStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3OutscaleStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 outscale storage bad request response has a 2xx status code +func (o *CreateS3OutscaleStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 outscale storage bad request response has a 3xx status code +func (o *CreateS3OutscaleStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 outscale storage bad request response has a 4xx status code +func (o *CreateS3OutscaleStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 outscale storage bad request response has a 5xx status code +func (o *CreateS3OutscaleStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 outscale storage bad request response a status code equal to that given +func (o *CreateS3OutscaleStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 outscale storage bad request response +func (o *CreateS3OutscaleStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3OutscaleStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/outscale][%d] createS3OutscaleStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3OutscaleStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/outscale][%d] createS3OutscaleStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3OutscaleStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3OutscaleStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3OutscaleStorageInternalServerError creates a CreateS3OutscaleStorageInternalServerError with default headers values +func NewCreateS3OutscaleStorageInternalServerError() *CreateS3OutscaleStorageInternalServerError { + return &CreateS3OutscaleStorageInternalServerError{} +} + +/* +CreateS3OutscaleStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3OutscaleStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 outscale storage internal server error response has a 2xx status code +func (o *CreateS3OutscaleStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 outscale storage internal server error response has a 3xx status code +func (o *CreateS3OutscaleStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 outscale storage internal server error response has a 4xx status code +func (o *CreateS3OutscaleStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 outscale storage internal server error response has a 5xx status code +func (o *CreateS3OutscaleStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 outscale storage internal server error response a status code equal to that given +func (o *CreateS3OutscaleStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 outscale storage internal server error response +func (o *CreateS3OutscaleStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3OutscaleStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/outscale][%d] createS3OutscaleStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3OutscaleStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/outscale][%d] createS3OutscaleStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3OutscaleStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3OutscaleStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_rabata_storage_parameters.go b/client/swagger/http/storage/create_s3_rabata_storage_parameters.go new file mode 100644 index 000000000..345e10e7d --- /dev/null +++ b/client/swagger/http/storage/create_s3_rabata_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3RabataStorageParams creates a new CreateS3RabataStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3RabataStorageParams() *CreateS3RabataStorageParams { + return &CreateS3RabataStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3RabataStorageParamsWithTimeout creates a new CreateS3RabataStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3RabataStorageParamsWithTimeout(timeout time.Duration) *CreateS3RabataStorageParams { + return &CreateS3RabataStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3RabataStorageParamsWithContext creates a new CreateS3RabataStorageParams object +// with the ability to set a context for a request. +func NewCreateS3RabataStorageParamsWithContext(ctx context.Context) *CreateS3RabataStorageParams { + return &CreateS3RabataStorageParams{ + Context: ctx, + } +} + +// NewCreateS3RabataStorageParamsWithHTTPClient creates a new CreateS3RabataStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3RabataStorageParamsWithHTTPClient(client *http.Client) *CreateS3RabataStorageParams { + return &CreateS3RabataStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3RabataStorageParams contains all the parameters to send to the API endpoint + + for the create s3 rabata storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3RabataStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3RabataStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 rabata storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3RabataStorageParams) WithDefaults() *CreateS3RabataStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 rabata storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3RabataStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 rabata storage params +func (o *CreateS3RabataStorageParams) WithTimeout(timeout time.Duration) *CreateS3RabataStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 rabata storage params +func (o *CreateS3RabataStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 rabata storage params +func (o *CreateS3RabataStorageParams) WithContext(ctx context.Context) *CreateS3RabataStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 rabata storage params +func (o *CreateS3RabataStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 rabata storage params +func (o *CreateS3RabataStorageParams) WithHTTPClient(client *http.Client) *CreateS3RabataStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 rabata storage params +func (o *CreateS3RabataStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 rabata storage params +func (o *CreateS3RabataStorageParams) WithRequest(request *models.StorageCreateS3RabataStorageRequest) *CreateS3RabataStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 rabata storage params +func (o *CreateS3RabataStorageParams) SetRequest(request *models.StorageCreateS3RabataStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3RabataStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_rabata_storage_responses.go b/client/swagger/http/storage/create_s3_rabata_storage_responses.go new file mode 100644 index 000000000..3c620c260 --- /dev/null +++ b/client/swagger/http/storage/create_s3_rabata_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3RabataStorageReader is a Reader for the CreateS3RabataStorage structure. +type CreateS3RabataStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3RabataStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3RabataStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3RabataStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3RabataStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/rabata] CreateS3RabataStorage", response, response.Code()) + } +} + +// NewCreateS3RabataStorageOK creates a CreateS3RabataStorageOK with default headers values +func NewCreateS3RabataStorageOK() *CreateS3RabataStorageOK { + return &CreateS3RabataStorageOK{} +} + +/* +CreateS3RabataStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3RabataStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 rabata storage o k response has a 2xx status code +func (o *CreateS3RabataStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 rabata storage o k response has a 3xx status code +func (o *CreateS3RabataStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 rabata storage o k response has a 4xx status code +func (o *CreateS3RabataStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 rabata storage o k response has a 5xx status code +func (o *CreateS3RabataStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 rabata storage o k response a status code equal to that given +func (o *CreateS3RabataStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 rabata storage o k response +func (o *CreateS3RabataStorageOK) Code() int { + return 200 +} + +func (o *CreateS3RabataStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rabata][%d] createS3RabataStorageOK %s", 200, payload) +} + +func (o *CreateS3RabataStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rabata][%d] createS3RabataStorageOK %s", 200, payload) +} + +func (o *CreateS3RabataStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3RabataStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3RabataStorageBadRequest creates a CreateS3RabataStorageBadRequest with default headers values +func NewCreateS3RabataStorageBadRequest() *CreateS3RabataStorageBadRequest { + return &CreateS3RabataStorageBadRequest{} +} + +/* +CreateS3RabataStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3RabataStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 rabata storage bad request response has a 2xx status code +func (o *CreateS3RabataStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 rabata storage bad request response has a 3xx status code +func (o *CreateS3RabataStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 rabata storage bad request response has a 4xx status code +func (o *CreateS3RabataStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 rabata storage bad request response has a 5xx status code +func (o *CreateS3RabataStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 rabata storage bad request response a status code equal to that given +func (o *CreateS3RabataStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 rabata storage bad request response +func (o *CreateS3RabataStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3RabataStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rabata][%d] createS3RabataStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3RabataStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rabata][%d] createS3RabataStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3RabataStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3RabataStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3RabataStorageInternalServerError creates a CreateS3RabataStorageInternalServerError with default headers values +func NewCreateS3RabataStorageInternalServerError() *CreateS3RabataStorageInternalServerError { + return &CreateS3RabataStorageInternalServerError{} +} + +/* +CreateS3RabataStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3RabataStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 rabata storage internal server error response has a 2xx status code +func (o *CreateS3RabataStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 rabata storage internal server error response has a 3xx status code +func (o *CreateS3RabataStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 rabata storage internal server error response has a 4xx status code +func (o *CreateS3RabataStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 rabata storage internal server error response has a 5xx status code +func (o *CreateS3RabataStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 rabata storage internal server error response a status code equal to that given +func (o *CreateS3RabataStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 rabata storage internal server error response +func (o *CreateS3RabataStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3RabataStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rabata][%d] createS3RabataStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3RabataStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rabata][%d] createS3RabataStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3RabataStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3RabataStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_selectel_storage_parameters.go b/client/swagger/http/storage/create_s3_selectel_storage_parameters.go new file mode 100644 index 000000000..c8d536f8f --- /dev/null +++ b/client/swagger/http/storage/create_s3_selectel_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3SelectelStorageParams creates a new CreateS3SelectelStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3SelectelStorageParams() *CreateS3SelectelStorageParams { + return &CreateS3SelectelStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3SelectelStorageParamsWithTimeout creates a new CreateS3SelectelStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3SelectelStorageParamsWithTimeout(timeout time.Duration) *CreateS3SelectelStorageParams { + return &CreateS3SelectelStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3SelectelStorageParamsWithContext creates a new CreateS3SelectelStorageParams object +// with the ability to set a context for a request. +func NewCreateS3SelectelStorageParamsWithContext(ctx context.Context) *CreateS3SelectelStorageParams { + return &CreateS3SelectelStorageParams{ + Context: ctx, + } +} + +// NewCreateS3SelectelStorageParamsWithHTTPClient creates a new CreateS3SelectelStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3SelectelStorageParamsWithHTTPClient(client *http.Client) *CreateS3SelectelStorageParams { + return &CreateS3SelectelStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3SelectelStorageParams contains all the parameters to send to the API endpoint + + for the create s3 selectel storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3SelectelStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3SelectelStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 selectel storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3SelectelStorageParams) WithDefaults() *CreateS3SelectelStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 selectel storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3SelectelStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 selectel storage params +func (o *CreateS3SelectelStorageParams) WithTimeout(timeout time.Duration) *CreateS3SelectelStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 selectel storage params +func (o *CreateS3SelectelStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 selectel storage params +func (o *CreateS3SelectelStorageParams) WithContext(ctx context.Context) *CreateS3SelectelStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 selectel storage params +func (o *CreateS3SelectelStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 selectel storage params +func (o *CreateS3SelectelStorageParams) WithHTTPClient(client *http.Client) *CreateS3SelectelStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 selectel storage params +func (o *CreateS3SelectelStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 selectel storage params +func (o *CreateS3SelectelStorageParams) WithRequest(request *models.StorageCreateS3SelectelStorageRequest) *CreateS3SelectelStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 selectel storage params +func (o *CreateS3SelectelStorageParams) SetRequest(request *models.StorageCreateS3SelectelStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3SelectelStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_selectel_storage_responses.go b/client/swagger/http/storage/create_s3_selectel_storage_responses.go new file mode 100644 index 000000000..866cbb34c --- /dev/null +++ b/client/swagger/http/storage/create_s3_selectel_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3SelectelStorageReader is a Reader for the CreateS3SelectelStorage structure. +type CreateS3SelectelStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3SelectelStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3SelectelStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3SelectelStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3SelectelStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/selectel] CreateS3SelectelStorage", response, response.Code()) + } +} + +// NewCreateS3SelectelStorageOK creates a CreateS3SelectelStorageOK with default headers values +func NewCreateS3SelectelStorageOK() *CreateS3SelectelStorageOK { + return &CreateS3SelectelStorageOK{} +} + +/* +CreateS3SelectelStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3SelectelStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 selectel storage o k response has a 2xx status code +func (o *CreateS3SelectelStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 selectel storage o k response has a 3xx status code +func (o *CreateS3SelectelStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 selectel storage o k response has a 4xx status code +func (o *CreateS3SelectelStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 selectel storage o k response has a 5xx status code +func (o *CreateS3SelectelStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 selectel storage o k response a status code equal to that given +func (o *CreateS3SelectelStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 selectel storage o k response +func (o *CreateS3SelectelStorageOK) Code() int { + return 200 +} + +func (o *CreateS3SelectelStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/selectel][%d] createS3SelectelStorageOK %s", 200, payload) +} + +func (o *CreateS3SelectelStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/selectel][%d] createS3SelectelStorageOK %s", 200, payload) +} + +func (o *CreateS3SelectelStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3SelectelStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3SelectelStorageBadRequest creates a CreateS3SelectelStorageBadRequest with default headers values +func NewCreateS3SelectelStorageBadRequest() *CreateS3SelectelStorageBadRequest { + return &CreateS3SelectelStorageBadRequest{} +} + +/* +CreateS3SelectelStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3SelectelStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 selectel storage bad request response has a 2xx status code +func (o *CreateS3SelectelStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 selectel storage bad request response has a 3xx status code +func (o *CreateS3SelectelStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 selectel storage bad request response has a 4xx status code +func (o *CreateS3SelectelStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 selectel storage bad request response has a 5xx status code +func (o *CreateS3SelectelStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 selectel storage bad request response a status code equal to that given +func (o *CreateS3SelectelStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 selectel storage bad request response +func (o *CreateS3SelectelStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3SelectelStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/selectel][%d] createS3SelectelStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3SelectelStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/selectel][%d] createS3SelectelStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3SelectelStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3SelectelStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3SelectelStorageInternalServerError creates a CreateS3SelectelStorageInternalServerError with default headers values +func NewCreateS3SelectelStorageInternalServerError() *CreateS3SelectelStorageInternalServerError { + return &CreateS3SelectelStorageInternalServerError{} +} + +/* +CreateS3SelectelStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3SelectelStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 selectel storage internal server error response has a 2xx status code +func (o *CreateS3SelectelStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 selectel storage internal server error response has a 3xx status code +func (o *CreateS3SelectelStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 selectel storage internal server error response has a 4xx status code +func (o *CreateS3SelectelStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 selectel storage internal server error response has a 5xx status code +func (o *CreateS3SelectelStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 selectel storage internal server error response a status code equal to that given +func (o *CreateS3SelectelStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 selectel storage internal server error response +func (o *CreateS3SelectelStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3SelectelStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/selectel][%d] createS3SelectelStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3SelectelStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/selectel][%d] createS3SelectelStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3SelectelStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3SelectelStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_servercore_storage_parameters.go b/client/swagger/http/storage/create_s3_servercore_storage_parameters.go new file mode 100644 index 000000000..f765d99a8 --- /dev/null +++ b/client/swagger/http/storage/create_s3_servercore_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3ServercoreStorageParams creates a new CreateS3ServercoreStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3ServercoreStorageParams() *CreateS3ServercoreStorageParams { + return &CreateS3ServercoreStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3ServercoreStorageParamsWithTimeout creates a new CreateS3ServercoreStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3ServercoreStorageParamsWithTimeout(timeout time.Duration) *CreateS3ServercoreStorageParams { + return &CreateS3ServercoreStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3ServercoreStorageParamsWithContext creates a new CreateS3ServercoreStorageParams object +// with the ability to set a context for a request. +func NewCreateS3ServercoreStorageParamsWithContext(ctx context.Context) *CreateS3ServercoreStorageParams { + return &CreateS3ServercoreStorageParams{ + Context: ctx, + } +} + +// NewCreateS3ServercoreStorageParamsWithHTTPClient creates a new CreateS3ServercoreStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3ServercoreStorageParamsWithHTTPClient(client *http.Client) *CreateS3ServercoreStorageParams { + return &CreateS3ServercoreStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3ServercoreStorageParams contains all the parameters to send to the API endpoint + + for the create s3 servercore storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3ServercoreStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3ServercoreStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 servercore storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3ServercoreStorageParams) WithDefaults() *CreateS3ServercoreStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 servercore storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3ServercoreStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 servercore storage params +func (o *CreateS3ServercoreStorageParams) WithTimeout(timeout time.Duration) *CreateS3ServercoreStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 servercore storage params +func (o *CreateS3ServercoreStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 servercore storage params +func (o *CreateS3ServercoreStorageParams) WithContext(ctx context.Context) *CreateS3ServercoreStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 servercore storage params +func (o *CreateS3ServercoreStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 servercore storage params +func (o *CreateS3ServercoreStorageParams) WithHTTPClient(client *http.Client) *CreateS3ServercoreStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 servercore storage params +func (o *CreateS3ServercoreStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 servercore storage params +func (o *CreateS3ServercoreStorageParams) WithRequest(request *models.StorageCreateS3ServercoreStorageRequest) *CreateS3ServercoreStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 servercore storage params +func (o *CreateS3ServercoreStorageParams) SetRequest(request *models.StorageCreateS3ServercoreStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3ServercoreStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_servercore_storage_responses.go b/client/swagger/http/storage/create_s3_servercore_storage_responses.go new file mode 100644 index 000000000..51c795878 --- /dev/null +++ b/client/swagger/http/storage/create_s3_servercore_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3ServercoreStorageReader is a Reader for the CreateS3ServercoreStorage structure. +type CreateS3ServercoreStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3ServercoreStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3ServercoreStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3ServercoreStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3ServercoreStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/servercore] CreateS3ServercoreStorage", response, response.Code()) + } +} + +// NewCreateS3ServercoreStorageOK creates a CreateS3ServercoreStorageOK with default headers values +func NewCreateS3ServercoreStorageOK() *CreateS3ServercoreStorageOK { + return &CreateS3ServercoreStorageOK{} +} + +/* +CreateS3ServercoreStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3ServercoreStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 servercore storage o k response has a 2xx status code +func (o *CreateS3ServercoreStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 servercore storage o k response has a 3xx status code +func (o *CreateS3ServercoreStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 servercore storage o k response has a 4xx status code +func (o *CreateS3ServercoreStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 servercore storage o k response has a 5xx status code +func (o *CreateS3ServercoreStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 servercore storage o k response a status code equal to that given +func (o *CreateS3ServercoreStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 servercore storage o k response +func (o *CreateS3ServercoreStorageOK) Code() int { + return 200 +} + +func (o *CreateS3ServercoreStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/servercore][%d] createS3ServercoreStorageOK %s", 200, payload) +} + +func (o *CreateS3ServercoreStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/servercore][%d] createS3ServercoreStorageOK %s", 200, payload) +} + +func (o *CreateS3ServercoreStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3ServercoreStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3ServercoreStorageBadRequest creates a CreateS3ServercoreStorageBadRequest with default headers values +func NewCreateS3ServercoreStorageBadRequest() *CreateS3ServercoreStorageBadRequest { + return &CreateS3ServercoreStorageBadRequest{} +} + +/* +CreateS3ServercoreStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3ServercoreStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 servercore storage bad request response has a 2xx status code +func (o *CreateS3ServercoreStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 servercore storage bad request response has a 3xx status code +func (o *CreateS3ServercoreStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 servercore storage bad request response has a 4xx status code +func (o *CreateS3ServercoreStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 servercore storage bad request response has a 5xx status code +func (o *CreateS3ServercoreStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 servercore storage bad request response a status code equal to that given +func (o *CreateS3ServercoreStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 servercore storage bad request response +func (o *CreateS3ServercoreStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3ServercoreStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/servercore][%d] createS3ServercoreStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3ServercoreStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/servercore][%d] createS3ServercoreStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3ServercoreStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3ServercoreStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3ServercoreStorageInternalServerError creates a CreateS3ServercoreStorageInternalServerError with default headers values +func NewCreateS3ServercoreStorageInternalServerError() *CreateS3ServercoreStorageInternalServerError { + return &CreateS3ServercoreStorageInternalServerError{} +} + +/* +CreateS3ServercoreStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3ServercoreStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 servercore storage internal server error response has a 2xx status code +func (o *CreateS3ServercoreStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 servercore storage internal server error response has a 3xx status code +func (o *CreateS3ServercoreStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 servercore storage internal server error response has a 4xx status code +func (o *CreateS3ServercoreStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 servercore storage internal server error response has a 5xx status code +func (o *CreateS3ServercoreStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 servercore storage internal server error response a status code equal to that given +func (o *CreateS3ServercoreStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 servercore storage internal server error response +func (o *CreateS3ServercoreStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3ServercoreStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/servercore][%d] createS3ServercoreStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3ServercoreStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/servercore][%d] createS3ServercoreStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3ServercoreStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3ServercoreStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_spectra_logic_storage_parameters.go b/client/swagger/http/storage/create_s3_spectra_logic_storage_parameters.go new file mode 100644 index 000000000..9d3d16326 --- /dev/null +++ b/client/swagger/http/storage/create_s3_spectra_logic_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3SpectraLogicStorageParams creates a new CreateS3SpectraLogicStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3SpectraLogicStorageParams() *CreateS3SpectraLogicStorageParams { + return &CreateS3SpectraLogicStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3SpectraLogicStorageParamsWithTimeout creates a new CreateS3SpectraLogicStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3SpectraLogicStorageParamsWithTimeout(timeout time.Duration) *CreateS3SpectraLogicStorageParams { + return &CreateS3SpectraLogicStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3SpectraLogicStorageParamsWithContext creates a new CreateS3SpectraLogicStorageParams object +// with the ability to set a context for a request. +func NewCreateS3SpectraLogicStorageParamsWithContext(ctx context.Context) *CreateS3SpectraLogicStorageParams { + return &CreateS3SpectraLogicStorageParams{ + Context: ctx, + } +} + +// NewCreateS3SpectraLogicStorageParamsWithHTTPClient creates a new CreateS3SpectraLogicStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3SpectraLogicStorageParamsWithHTTPClient(client *http.Client) *CreateS3SpectraLogicStorageParams { + return &CreateS3SpectraLogicStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3SpectraLogicStorageParams contains all the parameters to send to the API endpoint + + for the create s3 spectra logic storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3SpectraLogicStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3SpectraLogicStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 spectra logic storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3SpectraLogicStorageParams) WithDefaults() *CreateS3SpectraLogicStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 spectra logic storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3SpectraLogicStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 spectra logic storage params +func (o *CreateS3SpectraLogicStorageParams) WithTimeout(timeout time.Duration) *CreateS3SpectraLogicStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 spectra logic storage params +func (o *CreateS3SpectraLogicStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 spectra logic storage params +func (o *CreateS3SpectraLogicStorageParams) WithContext(ctx context.Context) *CreateS3SpectraLogicStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 spectra logic storage params +func (o *CreateS3SpectraLogicStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 spectra logic storage params +func (o *CreateS3SpectraLogicStorageParams) WithHTTPClient(client *http.Client) *CreateS3SpectraLogicStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 spectra logic storage params +func (o *CreateS3SpectraLogicStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 spectra logic storage params +func (o *CreateS3SpectraLogicStorageParams) WithRequest(request *models.StorageCreateS3SpectraLogicStorageRequest) *CreateS3SpectraLogicStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 spectra logic storage params +func (o *CreateS3SpectraLogicStorageParams) SetRequest(request *models.StorageCreateS3SpectraLogicStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3SpectraLogicStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_spectra_logic_storage_responses.go b/client/swagger/http/storage/create_s3_spectra_logic_storage_responses.go new file mode 100644 index 000000000..a32da88ee --- /dev/null +++ b/client/swagger/http/storage/create_s3_spectra_logic_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3SpectraLogicStorageReader is a Reader for the CreateS3SpectraLogicStorage structure. +type CreateS3SpectraLogicStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3SpectraLogicStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3SpectraLogicStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3SpectraLogicStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3SpectraLogicStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/spectralogic] CreateS3SpectraLogicStorage", response, response.Code()) + } +} + +// NewCreateS3SpectraLogicStorageOK creates a CreateS3SpectraLogicStorageOK with default headers values +func NewCreateS3SpectraLogicStorageOK() *CreateS3SpectraLogicStorageOK { + return &CreateS3SpectraLogicStorageOK{} +} + +/* +CreateS3SpectraLogicStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3SpectraLogicStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 spectra logic storage o k response has a 2xx status code +func (o *CreateS3SpectraLogicStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 spectra logic storage o k response has a 3xx status code +func (o *CreateS3SpectraLogicStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 spectra logic storage o k response has a 4xx status code +func (o *CreateS3SpectraLogicStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 spectra logic storage o k response has a 5xx status code +func (o *CreateS3SpectraLogicStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 spectra logic storage o k response a status code equal to that given +func (o *CreateS3SpectraLogicStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 spectra logic storage o k response +func (o *CreateS3SpectraLogicStorageOK) Code() int { + return 200 +} + +func (o *CreateS3SpectraLogicStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/spectralogic][%d] createS3SpectraLogicStorageOK %s", 200, payload) +} + +func (o *CreateS3SpectraLogicStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/spectralogic][%d] createS3SpectraLogicStorageOK %s", 200, payload) +} + +func (o *CreateS3SpectraLogicStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3SpectraLogicStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3SpectraLogicStorageBadRequest creates a CreateS3SpectraLogicStorageBadRequest with default headers values +func NewCreateS3SpectraLogicStorageBadRequest() *CreateS3SpectraLogicStorageBadRequest { + return &CreateS3SpectraLogicStorageBadRequest{} +} + +/* +CreateS3SpectraLogicStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3SpectraLogicStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 spectra logic storage bad request response has a 2xx status code +func (o *CreateS3SpectraLogicStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 spectra logic storage bad request response has a 3xx status code +func (o *CreateS3SpectraLogicStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 spectra logic storage bad request response has a 4xx status code +func (o *CreateS3SpectraLogicStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 spectra logic storage bad request response has a 5xx status code +func (o *CreateS3SpectraLogicStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 spectra logic storage bad request response a status code equal to that given +func (o *CreateS3SpectraLogicStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 spectra logic storage bad request response +func (o *CreateS3SpectraLogicStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3SpectraLogicStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/spectralogic][%d] createS3SpectraLogicStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3SpectraLogicStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/spectralogic][%d] createS3SpectraLogicStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3SpectraLogicStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3SpectraLogicStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3SpectraLogicStorageInternalServerError creates a CreateS3SpectraLogicStorageInternalServerError with default headers values +func NewCreateS3SpectraLogicStorageInternalServerError() *CreateS3SpectraLogicStorageInternalServerError { + return &CreateS3SpectraLogicStorageInternalServerError{} +} + +/* +CreateS3SpectraLogicStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3SpectraLogicStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 spectra logic storage internal server error response has a 2xx status code +func (o *CreateS3SpectraLogicStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 spectra logic storage internal server error response has a 3xx status code +func (o *CreateS3SpectraLogicStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 spectra logic storage internal server error response has a 4xx status code +func (o *CreateS3SpectraLogicStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 spectra logic storage internal server error response has a 5xx status code +func (o *CreateS3SpectraLogicStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 spectra logic storage internal server error response a status code equal to that given +func (o *CreateS3SpectraLogicStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 spectra logic storage internal server error response +func (o *CreateS3SpectraLogicStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3SpectraLogicStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/spectralogic][%d] createS3SpectraLogicStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3SpectraLogicStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/spectralogic][%d] createS3SpectraLogicStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3SpectraLogicStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3SpectraLogicStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_zata_storage_parameters.go b/client/swagger/http/storage/create_s3_zata_storage_parameters.go new file mode 100644 index 000000000..e557d4b5c --- /dev/null +++ b/client/swagger/http/storage/create_s3_zata_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3ZataStorageParams creates a new CreateS3ZataStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3ZataStorageParams() *CreateS3ZataStorageParams { + return &CreateS3ZataStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3ZataStorageParamsWithTimeout creates a new CreateS3ZataStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3ZataStorageParamsWithTimeout(timeout time.Duration) *CreateS3ZataStorageParams { + return &CreateS3ZataStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3ZataStorageParamsWithContext creates a new CreateS3ZataStorageParams object +// with the ability to set a context for a request. +func NewCreateS3ZataStorageParamsWithContext(ctx context.Context) *CreateS3ZataStorageParams { + return &CreateS3ZataStorageParams{ + Context: ctx, + } +} + +// NewCreateS3ZataStorageParamsWithHTTPClient creates a new CreateS3ZataStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3ZataStorageParamsWithHTTPClient(client *http.Client) *CreateS3ZataStorageParams { + return &CreateS3ZataStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3ZataStorageParams contains all the parameters to send to the API endpoint + + for the create s3 zata storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3ZataStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3ZataStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 zata storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3ZataStorageParams) WithDefaults() *CreateS3ZataStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 zata storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3ZataStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 zata storage params +func (o *CreateS3ZataStorageParams) WithTimeout(timeout time.Duration) *CreateS3ZataStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 zata storage params +func (o *CreateS3ZataStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 zata storage params +func (o *CreateS3ZataStorageParams) WithContext(ctx context.Context) *CreateS3ZataStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 zata storage params +func (o *CreateS3ZataStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 zata storage params +func (o *CreateS3ZataStorageParams) WithHTTPClient(client *http.Client) *CreateS3ZataStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 zata storage params +func (o *CreateS3ZataStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 zata storage params +func (o *CreateS3ZataStorageParams) WithRequest(request *models.StorageCreateS3ZataStorageRequest) *CreateS3ZataStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 zata storage params +func (o *CreateS3ZataStorageParams) SetRequest(request *models.StorageCreateS3ZataStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3ZataStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_zata_storage_responses.go b/client/swagger/http/storage/create_s3_zata_storage_responses.go new file mode 100644 index 000000000..df351bc4c --- /dev/null +++ b/client/swagger/http/storage/create_s3_zata_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3ZataStorageReader is a Reader for the CreateS3ZataStorage structure. +type CreateS3ZataStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3ZataStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3ZataStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3ZataStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3ZataStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/zata] CreateS3ZataStorage", response, response.Code()) + } +} + +// NewCreateS3ZataStorageOK creates a CreateS3ZataStorageOK with default headers values +func NewCreateS3ZataStorageOK() *CreateS3ZataStorageOK { + return &CreateS3ZataStorageOK{} +} + +/* +CreateS3ZataStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3ZataStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 zata storage o k response has a 2xx status code +func (o *CreateS3ZataStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 zata storage o k response has a 3xx status code +func (o *CreateS3ZataStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 zata storage o k response has a 4xx status code +func (o *CreateS3ZataStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 zata storage o k response has a 5xx status code +func (o *CreateS3ZataStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 zata storage o k response a status code equal to that given +func (o *CreateS3ZataStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 zata storage o k response +func (o *CreateS3ZataStorageOK) Code() int { + return 200 +} + +func (o *CreateS3ZataStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/zata][%d] createS3ZataStorageOK %s", 200, payload) +} + +func (o *CreateS3ZataStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/zata][%d] createS3ZataStorageOK %s", 200, payload) +} + +func (o *CreateS3ZataStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3ZataStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3ZataStorageBadRequest creates a CreateS3ZataStorageBadRequest with default headers values +func NewCreateS3ZataStorageBadRequest() *CreateS3ZataStorageBadRequest { + return &CreateS3ZataStorageBadRequest{} +} + +/* +CreateS3ZataStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3ZataStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 zata storage bad request response has a 2xx status code +func (o *CreateS3ZataStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 zata storage bad request response has a 3xx status code +func (o *CreateS3ZataStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 zata storage bad request response has a 4xx status code +func (o *CreateS3ZataStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 zata storage bad request response has a 5xx status code +func (o *CreateS3ZataStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 zata storage bad request response a status code equal to that given +func (o *CreateS3ZataStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 zata storage bad request response +func (o *CreateS3ZataStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3ZataStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/zata][%d] createS3ZataStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3ZataStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/zata][%d] createS3ZataStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3ZataStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3ZataStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3ZataStorageInternalServerError creates a CreateS3ZataStorageInternalServerError with default headers values +func NewCreateS3ZataStorageInternalServerError() *CreateS3ZataStorageInternalServerError { + return &CreateS3ZataStorageInternalServerError{} +} + +/* +CreateS3ZataStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3ZataStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 zata storage internal server error response has a 2xx status code +func (o *CreateS3ZataStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 zata storage internal server error response has a 3xx status code +func (o *CreateS3ZataStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 zata storage internal server error response has a 4xx status code +func (o *CreateS3ZataStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 zata storage internal server error response has a 5xx status code +func (o *CreateS3ZataStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 zata storage internal server error response a status code equal to that given +func (o *CreateS3ZataStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 zata storage internal server error response +func (o *CreateS3ZataStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3ZataStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/zata][%d] createS3ZataStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3ZataStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/zata][%d] createS3ZataStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3ZataStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3ZataStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_uptobox_storage_parameters.go b/client/swagger/http/storage/create_uptobox_storage_parameters.go deleted file mode 100644 index e8fe7d588..000000000 --- a/client/swagger/http/storage/create_uptobox_storage_parameters.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package storage - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/data-preservation-programs/singularity/client/swagger/models" -) - -// NewCreateUptoboxStorageParams creates a new CreateUptoboxStorageParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewCreateUptoboxStorageParams() *CreateUptoboxStorageParams { - return &CreateUptoboxStorageParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewCreateUptoboxStorageParamsWithTimeout creates a new CreateUptoboxStorageParams object -// with the ability to set a timeout on a request. -func NewCreateUptoboxStorageParamsWithTimeout(timeout time.Duration) *CreateUptoboxStorageParams { - return &CreateUptoboxStorageParams{ - timeout: timeout, - } -} - -// NewCreateUptoboxStorageParamsWithContext creates a new CreateUptoboxStorageParams object -// with the ability to set a context for a request. -func NewCreateUptoboxStorageParamsWithContext(ctx context.Context) *CreateUptoboxStorageParams { - return &CreateUptoboxStorageParams{ - Context: ctx, - } -} - -// NewCreateUptoboxStorageParamsWithHTTPClient creates a new CreateUptoboxStorageParams object -// with the ability to set a custom HTTPClient for a request. -func NewCreateUptoboxStorageParamsWithHTTPClient(client *http.Client) *CreateUptoboxStorageParams { - return &CreateUptoboxStorageParams{ - HTTPClient: client, - } -} - -/* -CreateUptoboxStorageParams contains all the parameters to send to the API endpoint - - for the create uptobox storage operation. - - Typically these are written to a http.Request. -*/ -type CreateUptoboxStorageParams struct { - - /* Request. - - Request body - */ - Request *models.StorageCreateUptoboxStorageRequest - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the create uptobox storage params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *CreateUptoboxStorageParams) WithDefaults() *CreateUptoboxStorageParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the create uptobox storage params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *CreateUptoboxStorageParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the create uptobox storage params -func (o *CreateUptoboxStorageParams) WithTimeout(timeout time.Duration) *CreateUptoboxStorageParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the create uptobox storage params -func (o *CreateUptoboxStorageParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the create uptobox storage params -func (o *CreateUptoboxStorageParams) WithContext(ctx context.Context) *CreateUptoboxStorageParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the create uptobox storage params -func (o *CreateUptoboxStorageParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the create uptobox storage params -func (o *CreateUptoboxStorageParams) WithHTTPClient(client *http.Client) *CreateUptoboxStorageParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the create uptobox storage params -func (o *CreateUptoboxStorageParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithRequest adds the request to the create uptobox storage params -func (o *CreateUptoboxStorageParams) WithRequest(request *models.StorageCreateUptoboxStorageRequest) *CreateUptoboxStorageParams { - o.SetRequest(request) - return o -} - -// SetRequest adds the request to the create uptobox storage params -func (o *CreateUptoboxStorageParams) SetRequest(request *models.StorageCreateUptoboxStorageRequest) { - o.Request = request -} - -// WriteToRequest writes these params to a swagger request -func (o *CreateUptoboxStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Request != nil { - if err := r.SetBodyParam(o.Request); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/client/swagger/http/storage/create_uptobox_storage_responses.go b/client/swagger/http/storage/create_uptobox_storage_responses.go deleted file mode 100644 index 3893469bb..000000000 --- a/client/swagger/http/storage/create_uptobox_storage_responses.go +++ /dev/null @@ -1,259 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package storage - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "encoding/json" - stderrors "errors" - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/data-preservation-programs/singularity/client/swagger/models" -) - -// CreateUptoboxStorageReader is a Reader for the CreateUptoboxStorage structure. -type CreateUptoboxStorageReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *CreateUptoboxStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { - switch response.Code() { - case 200: - result := NewCreateUptoboxStorageOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewCreateUptoboxStorageBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewCreateUptoboxStorageInternalServerError() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("[POST /storage/uptobox] CreateUptoboxStorage", response, response.Code()) - } -} - -// NewCreateUptoboxStorageOK creates a CreateUptoboxStorageOK with default headers values -func NewCreateUptoboxStorageOK() *CreateUptoboxStorageOK { - return &CreateUptoboxStorageOK{} -} - -/* -CreateUptoboxStorageOK describes a response with status code 200, with default header values. - -OK -*/ -type CreateUptoboxStorageOK struct { - Payload *models.ModelStorage -} - -// IsSuccess returns true when this create uptobox storage o k response has a 2xx status code -func (o *CreateUptoboxStorageOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this create uptobox storage o k response has a 3xx status code -func (o *CreateUptoboxStorageOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this create uptobox storage o k response has a 4xx status code -func (o *CreateUptoboxStorageOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this create uptobox storage o k response has a 5xx status code -func (o *CreateUptoboxStorageOK) IsServerError() bool { - return false -} - -// IsCode returns true when this create uptobox storage o k response a status code equal to that given -func (o *CreateUptoboxStorageOK) IsCode(code int) bool { - return code == 200 -} - -// Code gets the status code for the create uptobox storage o k response -func (o *CreateUptoboxStorageOK) Code() int { - return 200 -} - -func (o *CreateUptoboxStorageOK) Error() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/uptobox][%d] createUptoboxStorageOK %s", 200, payload) -} - -func (o *CreateUptoboxStorageOK) String() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/uptobox][%d] createUptoboxStorageOK %s", 200, payload) -} - -func (o *CreateUptoboxStorageOK) GetPayload() *models.ModelStorage { - return o.Payload -} - -func (o *CreateUptoboxStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.ModelStorage) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { - return err - } - - return nil -} - -// NewCreateUptoboxStorageBadRequest creates a CreateUptoboxStorageBadRequest with default headers values -func NewCreateUptoboxStorageBadRequest() *CreateUptoboxStorageBadRequest { - return &CreateUptoboxStorageBadRequest{} -} - -/* -CreateUptoboxStorageBadRequest describes a response with status code 400, with default header values. - -Bad Request -*/ -type CreateUptoboxStorageBadRequest struct { - Payload *models.APIHTTPError -} - -// IsSuccess returns true when this create uptobox storage bad request response has a 2xx status code -func (o *CreateUptoboxStorageBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this create uptobox storage bad request response has a 3xx status code -func (o *CreateUptoboxStorageBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this create uptobox storage bad request response has a 4xx status code -func (o *CreateUptoboxStorageBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this create uptobox storage bad request response has a 5xx status code -func (o *CreateUptoboxStorageBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this create uptobox storage bad request response a status code equal to that given -func (o *CreateUptoboxStorageBadRequest) IsCode(code int) bool { - return code == 400 -} - -// Code gets the status code for the create uptobox storage bad request response -func (o *CreateUptoboxStorageBadRequest) Code() int { - return 400 -} - -func (o *CreateUptoboxStorageBadRequest) Error() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/uptobox][%d] createUptoboxStorageBadRequest %s", 400, payload) -} - -func (o *CreateUptoboxStorageBadRequest) String() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/uptobox][%d] createUptoboxStorageBadRequest %s", 400, payload) -} - -func (o *CreateUptoboxStorageBadRequest) GetPayload() *models.APIHTTPError { - return o.Payload -} - -func (o *CreateUptoboxStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.APIHTTPError) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { - return err - } - - return nil -} - -// NewCreateUptoboxStorageInternalServerError creates a CreateUptoboxStorageInternalServerError with default headers values -func NewCreateUptoboxStorageInternalServerError() *CreateUptoboxStorageInternalServerError { - return &CreateUptoboxStorageInternalServerError{} -} - -/* -CreateUptoboxStorageInternalServerError describes a response with status code 500, with default header values. - -Internal Server Error -*/ -type CreateUptoboxStorageInternalServerError struct { - Payload *models.APIHTTPError -} - -// IsSuccess returns true when this create uptobox storage internal server error response has a 2xx status code -func (o *CreateUptoboxStorageInternalServerError) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this create uptobox storage internal server error response has a 3xx status code -func (o *CreateUptoboxStorageInternalServerError) IsRedirect() bool { - return false -} - -// IsClientError returns true when this create uptobox storage internal server error response has a 4xx status code -func (o *CreateUptoboxStorageInternalServerError) IsClientError() bool { - return false -} - -// IsServerError returns true when this create uptobox storage internal server error response has a 5xx status code -func (o *CreateUptoboxStorageInternalServerError) IsServerError() bool { - return true -} - -// IsCode returns true when this create uptobox storage internal server error response a status code equal to that given -func (o *CreateUptoboxStorageInternalServerError) IsCode(code int) bool { - return code == 500 -} - -// Code gets the status code for the create uptobox storage internal server error response -func (o *CreateUptoboxStorageInternalServerError) Code() int { - return 500 -} - -func (o *CreateUptoboxStorageInternalServerError) Error() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/uptobox][%d] createUptoboxStorageInternalServerError %s", 500, payload) -} - -func (o *CreateUptoboxStorageInternalServerError) String() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/uptobox][%d] createUptoboxStorageInternalServerError %s", 500, payload) -} - -func (o *CreateUptoboxStorageInternalServerError) GetPayload() *models.APIHTTPError { - return o.Payload -} - -func (o *CreateUptoboxStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.APIHTTPError) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { - return err - } - - return nil -} diff --git a/client/swagger/http/storage/storage_client.go b/client/swagger/http/storage/storage_client.go index b7832da08..f5f999eb0 100644 --- a/client/swagger/http/storage/storage_client.go +++ b/client/swagger/http/storage/storage_client.go @@ -130,18 +130,30 @@ type ClientService interface { CreateS3ArvanCloudStorage(params *CreateS3ArvanCloudStorageParams, opts ...ClientOption) (*CreateS3ArvanCloudStorageOK, error) + CreateS3BizflyCloudStorage(params *CreateS3BizflyCloudStorageParams, opts ...ClientOption) (*CreateS3BizflyCloudStorageOK, error) + CreateS3CephStorage(params *CreateS3CephStorageParams, opts ...ClientOption) (*CreateS3CephStorageOK, error) CreateS3ChinaMobileStorage(params *CreateS3ChinaMobileStorageParams, opts ...ClientOption) (*CreateS3ChinaMobileStorageOK, error) CreateS3CloudflareStorage(params *CreateS3CloudflareStorageParams, opts ...ClientOption) (*CreateS3CloudflareStorageOK, error) + CreateS3CubbitStorage(params *CreateS3CubbitStorageParams, opts ...ClientOption) (*CreateS3CubbitStorageOK, error) + CreateS3DigitalOceanStorage(params *CreateS3DigitalOceanStorageParams, opts ...ClientOption) (*CreateS3DigitalOceanStorageOK, error) CreateS3DreamhostStorage(params *CreateS3DreamhostStorageParams, opts ...ClientOption) (*CreateS3DreamhostStorageOK, error) + CreateS3ExabaStorage(params *CreateS3ExabaStorageParams, opts ...ClientOption) (*CreateS3ExabaStorageOK, error) + + CreateS3FileLuStorage(params *CreateS3FileLuStorageParams, opts ...ClientOption) (*CreateS3FileLuStorageOK, error) + + CreateS3FlashBladeStorage(params *CreateS3FlashBladeStorageParams, opts ...ClientOption) (*CreateS3FlashBladeStorageOK, error) + CreateS3GCSStorage(params *CreateS3GCSStorageParams, opts ...ClientOption) (*CreateS3GCSStorageOK, error) + CreateS3HetznerStorage(params *CreateS3HetznerStorageParams, opts ...ClientOption) (*CreateS3HetznerStorageOK, error) + CreateS3HuaweiOBSStorage(params *CreateS3HuaweiOBSStorageParams, opts ...ClientOption) (*CreateS3HuaweiOBSStorageOK, error) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts ...ClientOption) (*CreateS3IBMCOSStorageOK, error) @@ -150,6 +162,8 @@ type ClientService interface { CreateS3IONOSStorage(params *CreateS3IONOSStorageParams, opts ...ClientOption) (*CreateS3IONOSStorageOK, error) + CreateS3IntercoloStorage(params *CreateS3IntercoloStorageParams, opts ...ClientOption) (*CreateS3IntercoloStorageOK, error) + CreateS3LeviiaStorage(params *CreateS3LeviiaStorageParams, opts ...ClientOption) (*CreateS3LeviiaStorageOK, error) CreateS3LiaraStorage(params *CreateS3LiaraStorageParams, opts ...ClientOption) (*CreateS3LiaraStorageOK, error) @@ -160,16 +174,24 @@ type ClientService interface { CreateS3MagaluStorage(params *CreateS3MagaluStorageParams, opts ...ClientOption) (*CreateS3MagaluStorageOK, error) + CreateS3MegaStorage(params *CreateS3MegaStorageParams, opts ...ClientOption) (*CreateS3MegaStorageOK, error) + CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts ...ClientOption) (*CreateS3MinioStorageOK, error) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, opts ...ClientOption) (*CreateS3NeteaseStorageOK, error) + CreateS3OVHcloudStorage(params *CreateS3OVHcloudStorageParams, opts ...ClientOption) (*CreateS3OVHcloudStorageOK, error) + CreateS3OtherStorage(params *CreateS3OtherStorageParams, opts ...ClientOption) (*CreateS3OtherStorageOK, error) + CreateS3OutscaleStorage(params *CreateS3OutscaleStorageParams, opts ...ClientOption) (*CreateS3OutscaleStorageOK, error) + CreateS3PetaboxStorage(params *CreateS3PetaboxStorageParams, opts ...ClientOption) (*CreateS3PetaboxStorageOK, error) CreateS3QiniuStorage(params *CreateS3QiniuStorageParams, opts ...ClientOption) (*CreateS3QiniuStorageOK, error) + CreateS3RabataStorage(params *CreateS3RabataStorageParams, opts ...ClientOption) (*CreateS3RabataStorageOK, error) + CreateS3RackCorpStorage(params *CreateS3RackCorpStorageParams, opts ...ClientOption) (*CreateS3RackCorpStorageOK, error) CreateS3RcloneStorage(params *CreateS3RcloneStorageParams, opts ...ClientOption) (*CreateS3RcloneStorageOK, error) @@ -178,6 +200,12 @@ type ClientService interface { CreateS3SeaweedFSStorage(params *CreateS3SeaweedFSStorageParams, opts ...ClientOption) (*CreateS3SeaweedFSStorageOK, error) + CreateS3SelectelStorage(params *CreateS3SelectelStorageParams, opts ...ClientOption) (*CreateS3SelectelStorageOK, error) + + CreateS3ServercoreStorage(params *CreateS3ServercoreStorageParams, opts ...ClientOption) (*CreateS3ServercoreStorageOK, error) + + CreateS3SpectraLogicStorage(params *CreateS3SpectraLogicStorageParams, opts ...ClientOption) (*CreateS3SpectraLogicStorageOK, error) + CreateS3StackPathStorage(params *CreateS3StackPathStorageParams, opts ...ClientOption) (*CreateS3StackPathStorageOK, error) CreateS3StorjStorage(params *CreateS3StorjStorageParams, opts ...ClientOption) (*CreateS3StorjStorageOK, error) @@ -188,6 +216,8 @@ type ClientService interface { CreateS3WasabiStorage(params *CreateS3WasabiStorageParams, opts ...ClientOption) (*CreateS3WasabiStorageOK, error) + CreateS3ZataStorage(params *CreateS3ZataStorageParams, opts ...ClientOption) (*CreateS3ZataStorageOK, error) + CreateSeafileStorage(params *CreateSeafileStorageParams, opts ...ClientOption) (*CreateSeafileStorageOK, error) CreateSftpStorage(params *CreateSftpStorageParams, opts ...ClientOption) (*CreateSftpStorageOK, error) @@ -208,8 +238,6 @@ type ClientService interface { CreateUnionStorage(params *CreateUnionStorageParams, opts ...ClientOption) (*CreateUnionStorageOK, error) - CreateUptoboxStorage(params *CreateUptoboxStorageParams, opts ...ClientOption) (*CreateUptoboxStorageOK, error) - CreateWebdavStorage(params *CreateWebdavStorageParams, opts ...ClientOption) (*CreateWebdavStorageOK, error) CreateYandexStorage(params *CreateYandexStorageParams, opts ...ClientOption) (*CreateYandexStorageOK, error) @@ -1820,6 +1848,49 @@ func (a *Client) CreateS3ArvanCloudStorage(params *CreateS3ArvanCloudStoragePara panic(msg) } +/* +CreateS3BizflyCloudStorage creates s3 storage with bizfly cloud bizfly cloud simple storage +*/ +func (a *Client) CreateS3BizflyCloudStorage(params *CreateS3BizflyCloudStorageParams, opts ...ClientOption) (*CreateS3BizflyCloudStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3BizflyCloudStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3BizflyCloudStorage", + Method: "POST", + PathPattern: "/storage/s3/bizflycloud", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3BizflyCloudStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3BizflyCloudStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3BizflyCloudStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3CephStorage creates s3 storage with ceph ceph object storage */ @@ -1950,22 +2021,22 @@ func (a *Client) CreateS3CloudflareStorage(params *CreateS3CloudflareStoragePara } /* -CreateS3DigitalOceanStorage creates s3 storage with digital ocean digital ocean spaces +CreateS3CubbitStorage creates s3 storage with cubbit cubbit d s3 object storage */ -func (a *Client) CreateS3DigitalOceanStorage(params *CreateS3DigitalOceanStorageParams, opts ...ClientOption) (*CreateS3DigitalOceanStorageOK, error) { +func (a *Client) CreateS3CubbitStorage(params *CreateS3CubbitStorageParams, opts ...ClientOption) (*CreateS3CubbitStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3DigitalOceanStorageParams() + params = NewCreateS3CubbitStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3DigitalOceanStorage", + ID: "CreateS3CubbitStorage", Method: "POST", - PathPattern: "/storage/s3/digitalocean", + PathPattern: "/storage/s3/cubbit", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3DigitalOceanStorageReader{formats: a.formats}, + Reader: &CreateS3CubbitStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -1978,7 +2049,7 @@ func (a *Client) CreateS3DigitalOceanStorage(params *CreateS3DigitalOceanStorage } // only one success response has to be checked - success, ok := result.(*CreateS3DigitalOceanStorageOK) + success, ok := result.(*CreateS3CubbitStorageOK) if ok { return success, nil } @@ -1988,27 +2059,27 @@ func (a *Client) CreateS3DigitalOceanStorage(params *CreateS3DigitalOceanStorage // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3DigitalOceanStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3CubbitStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3DreamhostStorage creates s3 storage with dreamhost dreamhost dream objects +CreateS3DigitalOceanStorage creates s3 storage with digital ocean digital ocean spaces */ -func (a *Client) CreateS3DreamhostStorage(params *CreateS3DreamhostStorageParams, opts ...ClientOption) (*CreateS3DreamhostStorageOK, error) { +func (a *Client) CreateS3DigitalOceanStorage(params *CreateS3DigitalOceanStorageParams, opts ...ClientOption) (*CreateS3DigitalOceanStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3DreamhostStorageParams() + params = NewCreateS3DigitalOceanStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3DreamhostStorage", + ID: "CreateS3DigitalOceanStorage", Method: "POST", - PathPattern: "/storage/s3/dreamhost", + PathPattern: "/storage/s3/digitalocean", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3DreamhostStorageReader{formats: a.formats}, + Reader: &CreateS3DigitalOceanStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2021,7 +2092,7 @@ func (a *Client) CreateS3DreamhostStorage(params *CreateS3DreamhostStorageParams } // only one success response has to be checked - success, ok := result.(*CreateS3DreamhostStorageOK) + success, ok := result.(*CreateS3DigitalOceanStorageOK) if ok { return success, nil } @@ -2031,27 +2102,27 @@ func (a *Client) CreateS3DreamhostStorage(params *CreateS3DreamhostStorageParams // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3DreamhostStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3DigitalOceanStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3GCSStorage creates s3 storage with g c s google cloud storage +CreateS3DreamhostStorage creates s3 storage with dreamhost dreamhost dream objects */ -func (a *Client) CreateS3GCSStorage(params *CreateS3GCSStorageParams, opts ...ClientOption) (*CreateS3GCSStorageOK, error) { +func (a *Client) CreateS3DreamhostStorage(params *CreateS3DreamhostStorageParams, opts ...ClientOption) (*CreateS3DreamhostStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3GCSStorageParams() + params = NewCreateS3DreamhostStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3GCSStorage", + ID: "CreateS3DreamhostStorage", Method: "POST", - PathPattern: "/storage/s3/gcs", + PathPattern: "/storage/s3/dreamhost", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3GCSStorageReader{formats: a.formats}, + Reader: &CreateS3DreamhostStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2064,7 +2135,7 @@ func (a *Client) CreateS3GCSStorage(params *CreateS3GCSStorageParams, opts ...Cl } // only one success response has to be checked - success, ok := result.(*CreateS3GCSStorageOK) + success, ok := result.(*CreateS3DreamhostStorageOK) if ok { return success, nil } @@ -2074,27 +2145,27 @@ func (a *Client) CreateS3GCSStorage(params *CreateS3GCSStorageParams, opts ...Cl // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3GCSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3DreamhostStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3HuaweiOBSStorage creates s3 storage with huawei o b s huawei object storage service +CreateS3ExabaStorage creates s3 storage with exaba exaba object storage */ -func (a *Client) CreateS3HuaweiOBSStorage(params *CreateS3HuaweiOBSStorageParams, opts ...ClientOption) (*CreateS3HuaweiOBSStorageOK, error) { +func (a *Client) CreateS3ExabaStorage(params *CreateS3ExabaStorageParams, opts ...ClientOption) (*CreateS3ExabaStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3HuaweiOBSStorageParams() + params = NewCreateS3ExabaStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3HuaweiOBSStorage", + ID: "CreateS3ExabaStorage", Method: "POST", - PathPattern: "/storage/s3/huaweiobs", + PathPattern: "/storage/s3/exaba", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3HuaweiOBSStorageReader{formats: a.formats}, + Reader: &CreateS3ExabaStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2107,7 +2178,7 @@ func (a *Client) CreateS3HuaweiOBSStorage(params *CreateS3HuaweiOBSStorageParams } // only one success response has to be checked - success, ok := result.(*CreateS3HuaweiOBSStorageOK) + success, ok := result.(*CreateS3ExabaStorageOK) if ok { return success, nil } @@ -2117,27 +2188,27 @@ func (a *Client) CreateS3HuaweiOBSStorage(params *CreateS3HuaweiOBSStorageParams // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3HuaweiOBSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3ExabaStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3IBMCOSStorage creates s3 storage with i b m c o s i b m c o s s3 +CreateS3FileLuStorage creates s3 storage with file lu file lu s5 s3 compatible object storage */ -func (a *Client) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts ...ClientOption) (*CreateS3IBMCOSStorageOK, error) { +func (a *Client) CreateS3FileLuStorage(params *CreateS3FileLuStorageParams, opts ...ClientOption) (*CreateS3FileLuStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3IBMCOSStorageParams() + params = NewCreateS3FileLuStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3IBMCOSStorage", + ID: "CreateS3FileLuStorage", Method: "POST", - PathPattern: "/storage/s3/ibmcos", + PathPattern: "/storage/s3/filelu", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3IBMCOSStorageReader{formats: a.formats}, + Reader: &CreateS3FileLuStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2150,7 +2221,7 @@ func (a *Client) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts } // only one success response has to be checked - success, ok := result.(*CreateS3IBMCOSStorageOK) + success, ok := result.(*CreateS3FileLuStorageOK) if ok { return success, nil } @@ -2160,27 +2231,27 @@ func (a *Client) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3IBMCOSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3FileLuStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3IDriveStorage creates s3 storage with i drive i drive e2 +CreateS3FlashBladeStorage creates s3 storage with flash blade pure storage flash blade object storage */ -func (a *Client) CreateS3IDriveStorage(params *CreateS3IDriveStorageParams, opts ...ClientOption) (*CreateS3IDriveStorageOK, error) { +func (a *Client) CreateS3FlashBladeStorage(params *CreateS3FlashBladeStorageParams, opts ...ClientOption) (*CreateS3FlashBladeStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3IDriveStorageParams() + params = NewCreateS3FlashBladeStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3IDriveStorage", + ID: "CreateS3FlashBladeStorage", Method: "POST", - PathPattern: "/storage/s3/idrive", + PathPattern: "/storage/s3/flashblade", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3IDriveStorageReader{formats: a.formats}, + Reader: &CreateS3FlashBladeStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2193,7 +2264,7 @@ func (a *Client) CreateS3IDriveStorage(params *CreateS3IDriveStorageParams, opts } // only one success response has to be checked - success, ok := result.(*CreateS3IDriveStorageOK) + success, ok := result.(*CreateS3FlashBladeStorageOK) if ok { return success, nil } @@ -2203,27 +2274,27 @@ func (a *Client) CreateS3IDriveStorage(params *CreateS3IDriveStorageParams, opts // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3IDriveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3FlashBladeStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3IONOSStorage creates s3 storage with i o n o s i o n o s cloud +CreateS3GCSStorage creates s3 storage with g c s google cloud storage */ -func (a *Client) CreateS3IONOSStorage(params *CreateS3IONOSStorageParams, opts ...ClientOption) (*CreateS3IONOSStorageOK, error) { +func (a *Client) CreateS3GCSStorage(params *CreateS3GCSStorageParams, opts ...ClientOption) (*CreateS3GCSStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3IONOSStorageParams() + params = NewCreateS3GCSStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3IONOSStorage", + ID: "CreateS3GCSStorage", Method: "POST", - PathPattern: "/storage/s3/ionos", + PathPattern: "/storage/s3/gcs", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3IONOSStorageReader{formats: a.formats}, + Reader: &CreateS3GCSStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2236,7 +2307,7 @@ func (a *Client) CreateS3IONOSStorage(params *CreateS3IONOSStorageParams, opts . } // only one success response has to be checked - success, ok := result.(*CreateS3IONOSStorageOK) + success, ok := result.(*CreateS3GCSStorageOK) if ok { return success, nil } @@ -2246,27 +2317,27 @@ func (a *Client) CreateS3IONOSStorage(params *CreateS3IONOSStorageParams, opts . // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3IONOSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3GCSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3LeviiaStorage creates s3 storage with leviia leviia object storage +CreateS3HetznerStorage creates s3 storage with hetzner hetzner object storage */ -func (a *Client) CreateS3LeviiaStorage(params *CreateS3LeviiaStorageParams, opts ...ClientOption) (*CreateS3LeviiaStorageOK, error) { +func (a *Client) CreateS3HetznerStorage(params *CreateS3HetznerStorageParams, opts ...ClientOption) (*CreateS3HetznerStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3LeviiaStorageParams() + params = NewCreateS3HetznerStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3LeviiaStorage", + ID: "CreateS3HetznerStorage", Method: "POST", - PathPattern: "/storage/s3/leviia", + PathPattern: "/storage/s3/hetzner", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3LeviiaStorageReader{formats: a.formats}, + Reader: &CreateS3HetznerStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2279,7 +2350,7 @@ func (a *Client) CreateS3LeviiaStorage(params *CreateS3LeviiaStorageParams, opts } // only one success response has to be checked - success, ok := result.(*CreateS3LeviiaStorageOK) + success, ok := result.(*CreateS3HetznerStorageOK) if ok { return success, nil } @@ -2289,27 +2360,27 @@ func (a *Client) CreateS3LeviiaStorage(params *CreateS3LeviiaStorageParams, opts // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3LeviiaStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3HetznerStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3LiaraStorage creates s3 storage with liara liara object storage +CreateS3HuaweiOBSStorage creates s3 storage with huawei o b s huawei object storage service */ -func (a *Client) CreateS3LiaraStorage(params *CreateS3LiaraStorageParams, opts ...ClientOption) (*CreateS3LiaraStorageOK, error) { +func (a *Client) CreateS3HuaweiOBSStorage(params *CreateS3HuaweiOBSStorageParams, opts ...ClientOption) (*CreateS3HuaweiOBSStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3LiaraStorageParams() + params = NewCreateS3HuaweiOBSStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3LiaraStorage", + ID: "CreateS3HuaweiOBSStorage", Method: "POST", - PathPattern: "/storage/s3/liara", + PathPattern: "/storage/s3/huaweiobs", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3LiaraStorageReader{formats: a.formats}, + Reader: &CreateS3HuaweiOBSStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2322,7 +2393,7 @@ func (a *Client) CreateS3LiaraStorage(params *CreateS3LiaraStorageParams, opts . } // only one success response has to be checked - success, ok := result.(*CreateS3LiaraStorageOK) + success, ok := result.(*CreateS3HuaweiOBSStorageOK) if ok { return success, nil } @@ -2332,27 +2403,27 @@ func (a *Client) CreateS3LiaraStorage(params *CreateS3LiaraStorageParams, opts . // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3LiaraStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3HuaweiOBSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3LinodeStorage creates s3 storage with linode linode object storage +CreateS3IBMCOSStorage creates s3 storage with i b m c o s i b m c o s s3 */ -func (a *Client) CreateS3LinodeStorage(params *CreateS3LinodeStorageParams, opts ...ClientOption) (*CreateS3LinodeStorageOK, error) { +func (a *Client) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts ...ClientOption) (*CreateS3IBMCOSStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3LinodeStorageParams() + params = NewCreateS3IBMCOSStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3LinodeStorage", + ID: "CreateS3IBMCOSStorage", Method: "POST", - PathPattern: "/storage/s3/linode", + PathPattern: "/storage/s3/ibmcos", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3LinodeStorageReader{formats: a.formats}, + Reader: &CreateS3IBMCOSStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2365,7 +2436,7 @@ func (a *Client) CreateS3LinodeStorage(params *CreateS3LinodeStorageParams, opts } // only one success response has to be checked - success, ok := result.(*CreateS3LinodeStorageOK) + success, ok := result.(*CreateS3IBMCOSStorageOK) if ok { return success, nil } @@ -2375,27 +2446,27 @@ func (a *Client) CreateS3LinodeStorage(params *CreateS3LinodeStorageParams, opts // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3LinodeStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3IBMCOSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3LyveCloudStorage creates s3 storage with lyve cloud seagate lyve cloud +CreateS3IDriveStorage creates s3 storage with i drive i drive e2 */ -func (a *Client) CreateS3LyveCloudStorage(params *CreateS3LyveCloudStorageParams, opts ...ClientOption) (*CreateS3LyveCloudStorageOK, error) { +func (a *Client) CreateS3IDriveStorage(params *CreateS3IDriveStorageParams, opts ...ClientOption) (*CreateS3IDriveStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3LyveCloudStorageParams() + params = NewCreateS3IDriveStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3LyveCloudStorage", + ID: "CreateS3IDriveStorage", Method: "POST", - PathPattern: "/storage/s3/lyvecloud", + PathPattern: "/storage/s3/idrive", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3LyveCloudStorageReader{formats: a.formats}, + Reader: &CreateS3IDriveStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2408,7 +2479,7 @@ func (a *Client) CreateS3LyveCloudStorage(params *CreateS3LyveCloudStorageParams } // only one success response has to be checked - success, ok := result.(*CreateS3LyveCloudStorageOK) + success, ok := result.(*CreateS3IDriveStorageOK) if ok { return success, nil } @@ -2418,27 +2489,27 @@ func (a *Client) CreateS3LyveCloudStorage(params *CreateS3LyveCloudStorageParams // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3LyveCloudStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3IDriveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3MagaluStorage creates s3 storage with magalu magalu object storage +CreateS3IONOSStorage creates s3 storage with i o n o s i o n o s cloud */ -func (a *Client) CreateS3MagaluStorage(params *CreateS3MagaluStorageParams, opts ...ClientOption) (*CreateS3MagaluStorageOK, error) { +func (a *Client) CreateS3IONOSStorage(params *CreateS3IONOSStorageParams, opts ...ClientOption) (*CreateS3IONOSStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3MagaluStorageParams() + params = NewCreateS3IONOSStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3MagaluStorage", + ID: "CreateS3IONOSStorage", Method: "POST", - PathPattern: "/storage/s3/magalu", + PathPattern: "/storage/s3/ionos", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3MagaluStorageReader{formats: a.formats}, + Reader: &CreateS3IONOSStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2451,7 +2522,7 @@ func (a *Client) CreateS3MagaluStorage(params *CreateS3MagaluStorageParams, opts } // only one success response has to be checked - success, ok := result.(*CreateS3MagaluStorageOK) + success, ok := result.(*CreateS3IONOSStorageOK) if ok { return success, nil } @@ -2461,27 +2532,27 @@ func (a *Client) CreateS3MagaluStorage(params *CreateS3MagaluStorageParams, opts // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3MagaluStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3IONOSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3MinioStorage creates s3 storage with minio minio object storage +CreateS3IntercoloStorage creates s3 storage with intercolo intercolo object storage */ -func (a *Client) CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts ...ClientOption) (*CreateS3MinioStorageOK, error) { +func (a *Client) CreateS3IntercoloStorage(params *CreateS3IntercoloStorageParams, opts ...ClientOption) (*CreateS3IntercoloStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3MinioStorageParams() + params = NewCreateS3IntercoloStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3MinioStorage", + ID: "CreateS3IntercoloStorage", Method: "POST", - PathPattern: "/storage/s3/minio", + PathPattern: "/storage/s3/intercolo", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3MinioStorageReader{formats: a.formats}, + Reader: &CreateS3IntercoloStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2494,7 +2565,7 @@ func (a *Client) CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts . } // only one success response has to be checked - success, ok := result.(*CreateS3MinioStorageOK) + success, ok := result.(*CreateS3IntercoloStorageOK) if ok { return success, nil } @@ -2504,27 +2575,27 @@ func (a *Client) CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts . // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3MinioStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3IntercoloStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3NeteaseStorage creates s3 storage with netease netease object storage n o s +CreateS3LeviiaStorage creates s3 storage with leviia leviia object storage */ -func (a *Client) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, opts ...ClientOption) (*CreateS3NeteaseStorageOK, error) { +func (a *Client) CreateS3LeviiaStorage(params *CreateS3LeviiaStorageParams, opts ...ClientOption) (*CreateS3LeviiaStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3NeteaseStorageParams() + params = NewCreateS3LeviiaStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3NeteaseStorage", + ID: "CreateS3LeviiaStorage", Method: "POST", - PathPattern: "/storage/s3/netease", + PathPattern: "/storage/s3/leviia", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3NeteaseStorageReader{formats: a.formats}, + Reader: &CreateS3LeviiaStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2537,7 +2608,7 @@ func (a *Client) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, op } // only one success response has to be checked - success, ok := result.(*CreateS3NeteaseStorageOK) + success, ok := result.(*CreateS3LeviiaStorageOK) if ok { return success, nil } @@ -2547,27 +2618,27 @@ func (a *Client) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, op // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3NeteaseStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3LeviiaStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3OtherStorage creates s3 storage with other any other s3 compatible provider +CreateS3LiaraStorage creates s3 storage with liara liara object storage */ -func (a *Client) CreateS3OtherStorage(params *CreateS3OtherStorageParams, opts ...ClientOption) (*CreateS3OtherStorageOK, error) { +func (a *Client) CreateS3LiaraStorage(params *CreateS3LiaraStorageParams, opts ...ClientOption) (*CreateS3LiaraStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3OtherStorageParams() + params = NewCreateS3LiaraStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3OtherStorage", + ID: "CreateS3LiaraStorage", Method: "POST", - PathPattern: "/storage/s3/other", + PathPattern: "/storage/s3/liara", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3OtherStorageReader{formats: a.formats}, + Reader: &CreateS3LiaraStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2580,7 +2651,7 @@ func (a *Client) CreateS3OtherStorage(params *CreateS3OtherStorageParams, opts . } // only one success response has to be checked - success, ok := result.(*CreateS3OtherStorageOK) + success, ok := result.(*CreateS3LiaraStorageOK) if ok { return success, nil } @@ -2590,27 +2661,27 @@ func (a *Client) CreateS3OtherStorage(params *CreateS3OtherStorageParams, opts . // no default response is defined. // // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3OtherStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + msg := fmt.Sprintf("unexpected success response for CreateS3LiaraStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3PetaboxStorage creates s3 storage with petabox petabox object storage +CreateS3LinodeStorage creates s3 storage with linode linode object storage */ -func (a *Client) CreateS3PetaboxStorage(params *CreateS3PetaboxStorageParams, opts ...ClientOption) (*CreateS3PetaboxStorageOK, error) { +func (a *Client) CreateS3LinodeStorage(params *CreateS3LinodeStorageParams, opts ...ClientOption) (*CreateS3LinodeStorageOK, error) { // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3PetaboxStorageParams() + params = NewCreateS3LinodeStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3PetaboxStorage", + ID: "CreateS3LinodeStorage", Method: "POST", - PathPattern: "/storage/s3/petabox", + PathPattern: "/storage/s3/linode", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3PetaboxStorageReader{formats: a.formats}, + Reader: &CreateS3LinodeStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } @@ -2623,7 +2694,394 @@ func (a *Client) CreateS3PetaboxStorage(params *CreateS3PetaboxStorageParams, op } // only one success response has to be checked - success, ok := result.(*CreateS3PetaboxStorageOK) + success, ok := result.(*CreateS3LinodeStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3LinodeStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3LyveCloudStorage creates s3 storage with lyve cloud seagate lyve cloud +*/ +func (a *Client) CreateS3LyveCloudStorage(params *CreateS3LyveCloudStorageParams, opts ...ClientOption) (*CreateS3LyveCloudStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3LyveCloudStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3LyveCloudStorage", + Method: "POST", + PathPattern: "/storage/s3/lyvecloud", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3LyveCloudStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3LyveCloudStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3LyveCloudStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3MagaluStorage creates s3 storage with magalu magalu object storage +*/ +func (a *Client) CreateS3MagaluStorage(params *CreateS3MagaluStorageParams, opts ...ClientOption) (*CreateS3MagaluStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3MagaluStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3MagaluStorage", + Method: "POST", + PathPattern: "/storage/s3/magalu", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3MagaluStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3MagaluStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3MagaluStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3MegaStorage creates s3 storage with mega m e g a s4 object storage +*/ +func (a *Client) CreateS3MegaStorage(params *CreateS3MegaStorageParams, opts ...ClientOption) (*CreateS3MegaStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3MegaStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3MegaStorage", + Method: "POST", + PathPattern: "/storage/s3/mega", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3MegaStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3MegaStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3MegaStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3MinioStorage creates s3 storage with minio minio object storage +*/ +func (a *Client) CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts ...ClientOption) (*CreateS3MinioStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3MinioStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3MinioStorage", + Method: "POST", + PathPattern: "/storage/s3/minio", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3MinioStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3MinioStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3MinioStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3NeteaseStorage creates s3 storage with netease netease object storage n o s +*/ +func (a *Client) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, opts ...ClientOption) (*CreateS3NeteaseStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3NeteaseStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3NeteaseStorage", + Method: "POST", + PathPattern: "/storage/s3/netease", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3NeteaseStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3NeteaseStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3NeteaseStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3OVHcloudStorage creates s3 storage with o v hcloud o v hcloud object storage +*/ +func (a *Client) CreateS3OVHcloudStorage(params *CreateS3OVHcloudStorageParams, opts ...ClientOption) (*CreateS3OVHcloudStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3OVHcloudStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3OVHcloudStorage", + Method: "POST", + PathPattern: "/storage/s3/ovhcloud", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3OVHcloudStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3OVHcloudStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3OVHcloudStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3OtherStorage creates s3 storage with other any other s3 compatible provider +*/ +func (a *Client) CreateS3OtherStorage(params *CreateS3OtherStorageParams, opts ...ClientOption) (*CreateS3OtherStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3OtherStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3OtherStorage", + Method: "POST", + PathPattern: "/storage/s3/other", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3OtherStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3OtherStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3OtherStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3OutscaleStorage creates s3 storage with outscale o u t s c a l e object storage o o s +*/ +func (a *Client) CreateS3OutscaleStorage(params *CreateS3OutscaleStorageParams, opts ...ClientOption) (*CreateS3OutscaleStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3OutscaleStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3OutscaleStorage", + Method: "POST", + PathPattern: "/storage/s3/outscale", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3OutscaleStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3OutscaleStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3OutscaleStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3PetaboxStorage creates s3 storage with petabox petabox object storage +*/ +func (a *Client) CreateS3PetaboxStorage(params *CreateS3PetaboxStorageParams, opts ...ClientOption) (*CreateS3PetaboxStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3PetaboxStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3PetaboxStorage", + Method: "POST", + PathPattern: "/storage/s3/petabox", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3PetaboxStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3PetaboxStorageOK) if ok { return success, nil } @@ -2680,6 +3138,49 @@ func (a *Client) CreateS3QiniuStorage(params *CreateS3QiniuStorageParams, opts . panic(msg) } +/* +CreateS3RabataStorage creates s3 storage with rabata rabata cloud storage +*/ +func (a *Client) CreateS3RabataStorage(params *CreateS3RabataStorageParams, opts ...ClientOption) (*CreateS3RabataStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3RabataStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3RabataStorage", + Method: "POST", + PathPattern: "/storage/s3/rabata", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3RabataStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3RabataStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3RabataStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3RackCorpStorage creates s3 storage with rack corp rack corp object storage */ @@ -2852,6 +3353,135 @@ func (a *Client) CreateS3SeaweedFSStorage(params *CreateS3SeaweedFSStorageParams panic(msg) } +/* +CreateS3SelectelStorage creates s3 storage with selectel selectel object storage +*/ +func (a *Client) CreateS3SelectelStorage(params *CreateS3SelectelStorageParams, opts ...ClientOption) (*CreateS3SelectelStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3SelectelStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3SelectelStorage", + Method: "POST", + PathPattern: "/storage/s3/selectel", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3SelectelStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3SelectelStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3SelectelStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3ServercoreStorage creates s3 storage with servercore servercore object storage +*/ +func (a *Client) CreateS3ServercoreStorage(params *CreateS3ServercoreStorageParams, opts ...ClientOption) (*CreateS3ServercoreStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3ServercoreStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3ServercoreStorage", + Method: "POST", + PathPattern: "/storage/s3/servercore", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3ServercoreStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3ServercoreStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3ServercoreStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3SpectraLogicStorage creates s3 storage with spectra logic spectra logic black pearl +*/ +func (a *Client) CreateS3SpectraLogicStorage(params *CreateS3SpectraLogicStorageParams, opts ...ClientOption) (*CreateS3SpectraLogicStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3SpectraLogicStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3SpectraLogicStorage", + Method: "POST", + PathPattern: "/storage/s3/spectralogic", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3SpectraLogicStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3SpectraLogicStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3SpectraLogicStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3StackPathStorage creates s3 storage with stack path stack path object storage */ @@ -3067,6 +3697,49 @@ func (a *Client) CreateS3WasabiStorage(params *CreateS3WasabiStorageParams, opts panic(msg) } +/* +CreateS3ZataStorage creates s3 storage with zata zata s3 compatible gateway +*/ +func (a *Client) CreateS3ZataStorage(params *CreateS3ZataStorageParams, opts ...ClientOption) (*CreateS3ZataStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3ZataStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3ZataStorage", + Method: "POST", + PathPattern: "/storage/s3/zata", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3ZataStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3ZataStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3ZataStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateSeafileStorage creates seafile storage */ @@ -3497,49 +4170,6 @@ func (a *Client) CreateUnionStorage(params *CreateUnionStorageParams, opts ...Cl panic(msg) } -/* -CreateUptoboxStorage creates uptobox storage -*/ -func (a *Client) CreateUptoboxStorage(params *CreateUptoboxStorageParams, opts ...ClientOption) (*CreateUptoboxStorageOK, error) { - // NOTE: parameters are not validated before sending - if params == nil { - params = NewCreateUptoboxStorageParams() - } - op := &runtime.ClientOperation{ - ID: "CreateUptoboxStorage", - Method: "POST", - PathPattern: "/storage/uptobox", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &CreateUptoboxStorageReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - - // only one success response has to be checked - success, ok := result.(*CreateUptoboxStorageOK) - if ok { - return success, nil - } - - // unexpected success response. - - // no default response is defined. - // - // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateUptoboxStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - /* CreateWebdavStorage creates webdav storage */ diff --git a/client/swagger/models/storage_azureblob_config.go b/client/swagger/models/storage_azureblob_config.go index 72f58db98..83fa9b8e4 100644 --- a/client/swagger/models/storage_azureblob_config.go +++ b/client/swagger/models/storage_azureblob_config.go @@ -44,6 +44,15 @@ type StorageAzureblobConfig struct { // Send the certificate chain when using certificate auth. ClientSendCertificateChain *bool `json:"clientSendCertificateChain,omitempty"` + // Storage Connection String. + ConnectionString string `json:"connectionString,omitempty"` + + // Concurrency for multipart copy. + CopyConcurrency *int64 `json:"copyConcurrency,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + // Set to specify how to deal with snapshots on blob deletion. DeleteSnapshots string `json:"deleteSnapshots,omitempty"` @@ -56,6 +65,9 @@ type StorageAzureblobConfig struct { // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` + // Skip requesting Microsoft Entra instance metadata + DisableInstanceDiscovery *bool `json:"disableInstanceDiscovery,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` @@ -113,6 +125,12 @@ type StorageAzureblobConfig struct { // Cutoff for switching to chunked upload (<= 256 MiB) (deprecated). UploadCutoff string `json:"uploadCutoff,omitempty"` + // Use Azure CLI tool az for authentication + UseAz *bool `json:"useAz,omitempty"` + + // Whether to use the Copy Blob API when copying to the same storage account. + UseCopyBlob *bool `json:"useCopyBlob,omitempty"` + // Uses local storage emulator if provided as 'true'. UseEmulator *bool `json:"useEmulator,omitempty"` diff --git a/client/swagger/models/storage_b2_config.go b/client/swagger/models/storage_b2_config.go index 8ae0acb60..448725326 100644 --- a/client/swagger/models/storage_b2_config.go +++ b/client/swagger/models/storage_b2_config.go @@ -59,6 +59,18 @@ type StorageB2Config struct { // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + // If using SSE-C, the server-side encryption algorithm used when storing this object in B2. + SseCustomerAlgorithm string `json:"sseCustomerAlgorithm,omitempty"` + + // To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data + SseCustomerKey string `json:"sseCustomerKey,omitempty"` + + // To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data + SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64,omitempty"` + + // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5,omitempty"` + // A flag string for X-Bz-Test-Mode header for debugging. TestMode string `json:"testMode,omitempty"` diff --git a/client/swagger/models/storage_box_config.go b/client/swagger/models/storage_box_config.go index 32d445fe8..bb22218bf 100644 --- a/client/swagger/models/storage_box_config.go +++ b/client/swagger/models/storage_box_config.go @@ -30,6 +30,9 @@ type StorageBoxConfig struct { // Example: user BoxSubType *string `json:"boxSubType,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` @@ -39,6 +42,9 @@ type StorageBoxConfig struct { // Max number of times to try committing a multipart file. CommitRetries *int64 `json:"commitRetries,omitempty"` + // Box App config.json contents. + ConfigCredentials string `json:"configCredentials,omitempty"` + // Description of the remote. Description string `json:"description,omitempty"` diff --git a/client/swagger/models/storage_create_internetarchive_storage_request.go b/client/swagger/models/storage_create_internetarchive_storage_request.go index e1baaa50e..956e23602 100644 --- a/client/swagger/models/storage_create_internetarchive_storage_request.go +++ b/client/swagger/models/storage_create_internetarchive_storage_request.go @@ -5,113 +5,7 @@ package models // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - // StorageCreateInternetarchiveStorageRequest storage create internetarchive storage request // // swagger:model storage.createInternetarchiveStorageRequest -type StorageCreateInternetarchiveStorageRequest struct { - - // config for underlying HTTP client - ClientConfig struct { - ModelClientConfig - } `json:"clientConfig,omitempty"` - - // config for the storage - Config struct { - StorageInternetarchiveConfig - } `json:"config,omitempty"` - - // Name of the storage, must be unique - // Example: my-storage - Name string `json:"name,omitempty"` - - // Path of the storage - Path string `json:"path,omitempty"` -} - -// Validate validates this storage create internetarchive storage request -func (m *StorageCreateInternetarchiveStorageRequest) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateClientConfig(formats); err != nil { - res = append(res, err) - } - - if err := m.validateConfig(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *StorageCreateInternetarchiveStorageRequest) validateClientConfig(formats strfmt.Registry) error { - if swag.IsZero(m.ClientConfig) { // not required - return nil - } - - return nil -} - -func (m *StorageCreateInternetarchiveStorageRequest) validateConfig(formats strfmt.Registry) error { - if swag.IsZero(m.Config) { // not required - return nil - } - - return nil -} - -// ContextValidate validate this storage create internetarchive storage request based on the context it is used -func (m *StorageCreateInternetarchiveStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateClientConfig(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateConfig(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *StorageCreateInternetarchiveStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { - - return nil -} - -func (m *StorageCreateInternetarchiveStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { - - return nil -} - -// MarshalBinary interface implementation -func (m *StorageCreateInternetarchiveStorageRequest) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *StorageCreateInternetarchiveStorageRequest) UnmarshalBinary(b []byte) error { - var res StorageCreateInternetarchiveStorageRequest - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} +type StorageCreateInternetarchiveStorageRequest any diff --git a/client/swagger/models/storage_create_s3_bizfly_cloud_storage_request.go b/client/swagger/models/storage_create_s3_bizfly_cloud_storage_request.go new file mode 100644 index 000000000..481264614 --- /dev/null +++ b/client/swagger/models/storage_create_s3_bizfly_cloud_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3BizflyCloudStorageRequest storage create s3 bizfly cloud storage request +// +// swagger:model storage.createS3BizflyCloudStorageRequest +type StorageCreateS3BizflyCloudStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3BizflyCloudConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 bizfly cloud storage request +func (m *StorageCreateS3BizflyCloudStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3BizflyCloudStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3BizflyCloudStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 bizfly cloud storage request based on the context it is used +func (m *StorageCreateS3BizflyCloudStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3BizflyCloudStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3BizflyCloudStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3BizflyCloudStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3BizflyCloudStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3BizflyCloudStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_cubbit_storage_request.go b/client/swagger/models/storage_create_s3_cubbit_storage_request.go new file mode 100644 index 000000000..305497535 --- /dev/null +++ b/client/swagger/models/storage_create_s3_cubbit_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3CubbitStorageRequest storage create s3 cubbit storage request +// +// swagger:model storage.createS3CubbitStorageRequest +type StorageCreateS3CubbitStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3CubbitConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 cubbit storage request +func (m *StorageCreateS3CubbitStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3CubbitStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3CubbitStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 cubbit storage request based on the context it is used +func (m *StorageCreateS3CubbitStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3CubbitStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3CubbitStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3CubbitStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3CubbitStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3CubbitStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_uptobox_storage_request.go b/client/swagger/models/storage_create_s3_exaba_storage_request.go similarity index 67% rename from client/swagger/models/storage_create_uptobox_storage_request.go rename to client/swagger/models/storage_create_s3_exaba_storage_request.go index 70758fa8b..b48b89eb2 100644 --- a/client/swagger/models/storage_create_uptobox_storage_request.go +++ b/client/swagger/models/storage_create_s3_exaba_storage_request.go @@ -13,10 +13,10 @@ import ( "github.com/go-openapi/swag" ) -// StorageCreateUptoboxStorageRequest storage create uptobox storage request +// StorageCreateS3ExabaStorageRequest storage create s3 exaba storage request // -// swagger:model storage.createUptoboxStorageRequest -type StorageCreateUptoboxStorageRequest struct { +// swagger:model storage.createS3ExabaStorageRequest +type StorageCreateS3ExabaStorageRequest struct { // config for underlying HTTP client ClientConfig struct { @@ -25,7 +25,7 @@ type StorageCreateUptoboxStorageRequest struct { // config for the storage Config struct { - StorageUptoboxConfig + StorageS3ExabaConfig } `json:"config,omitempty"` // Name of the storage, must be unique @@ -36,8 +36,8 @@ type StorageCreateUptoboxStorageRequest struct { Path string `json:"path,omitempty"` } -// Validate validates this storage create uptobox storage request -func (m *StorageCreateUptoboxStorageRequest) Validate(formats strfmt.Registry) error { +// Validate validates this storage create s3 exaba storage request +func (m *StorageCreateS3ExabaStorageRequest) Validate(formats strfmt.Registry) error { var res []error if err := m.validateClientConfig(formats); err != nil { @@ -54,7 +54,7 @@ func (m *StorageCreateUptoboxStorageRequest) Validate(formats strfmt.Registry) e return nil } -func (m *StorageCreateUptoboxStorageRequest) validateClientConfig(formats strfmt.Registry) error { +func (m *StorageCreateS3ExabaStorageRequest) validateClientConfig(formats strfmt.Registry) error { if swag.IsZero(m.ClientConfig) { // not required return nil } @@ -62,7 +62,7 @@ func (m *StorageCreateUptoboxStorageRequest) validateClientConfig(formats strfmt return nil } -func (m *StorageCreateUptoboxStorageRequest) validateConfig(formats strfmt.Registry) error { +func (m *StorageCreateS3ExabaStorageRequest) validateConfig(formats strfmt.Registry) error { if swag.IsZero(m.Config) { // not required return nil } @@ -70,8 +70,8 @@ func (m *StorageCreateUptoboxStorageRequest) validateConfig(formats strfmt.Regis return nil } -// ContextValidate validate this storage create uptobox storage request based on the context it is used -func (m *StorageCreateUptoboxStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { +// ContextValidate validate this storage create s3 exaba storage request based on the context it is used +func (m *StorageCreateS3ExabaStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error if err := m.contextValidateClientConfig(ctx, formats); err != nil { @@ -88,18 +88,18 @@ func (m *StorageCreateUptoboxStorageRequest) ContextValidate(ctx context.Context return nil } -func (m *StorageCreateUptoboxStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { +func (m *StorageCreateS3ExabaStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { return nil } -func (m *StorageCreateUptoboxStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { +func (m *StorageCreateS3ExabaStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { return nil } // MarshalBinary interface implementation -func (m *StorageCreateUptoboxStorageRequest) MarshalBinary() ([]byte, error) { +func (m *StorageCreateS3ExabaStorageRequest) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -107,8 +107,8 @@ func (m *StorageCreateUptoboxStorageRequest) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *StorageCreateUptoboxStorageRequest) UnmarshalBinary(b []byte) error { - var res StorageCreateUptoboxStorageRequest +func (m *StorageCreateS3ExabaStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3ExabaStorageRequest if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/client/swagger/models/storage_create_s3_file_lu_storage_request.go b/client/swagger/models/storage_create_s3_file_lu_storage_request.go new file mode 100644 index 000000000..04760fc8f --- /dev/null +++ b/client/swagger/models/storage_create_s3_file_lu_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3FileLuStorageRequest storage create s3 file lu storage request +// +// swagger:model storage.createS3FileLuStorageRequest +type StorageCreateS3FileLuStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3FileLuConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 file lu storage request +func (m *StorageCreateS3FileLuStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3FileLuStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3FileLuStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 file lu storage request based on the context it is used +func (m *StorageCreateS3FileLuStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3FileLuStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3FileLuStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3FileLuStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3FileLuStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3FileLuStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_flash_blade_storage_request.go b/client/swagger/models/storage_create_s3_flash_blade_storage_request.go new file mode 100644 index 000000000..9ba862b2e --- /dev/null +++ b/client/swagger/models/storage_create_s3_flash_blade_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3FlashBladeStorageRequest storage create s3 flash blade storage request +// +// swagger:model storage.createS3FlashBladeStorageRequest +type StorageCreateS3FlashBladeStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3FlashBladeConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 flash blade storage request +func (m *StorageCreateS3FlashBladeStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3FlashBladeStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3FlashBladeStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 flash blade storage request based on the context it is used +func (m *StorageCreateS3FlashBladeStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3FlashBladeStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3FlashBladeStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3FlashBladeStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3FlashBladeStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3FlashBladeStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_hetzner_storage_request.go b/client/swagger/models/storage_create_s3_hetzner_storage_request.go new file mode 100644 index 000000000..af91ebfae --- /dev/null +++ b/client/swagger/models/storage_create_s3_hetzner_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3HetznerStorageRequest storage create s3 hetzner storage request +// +// swagger:model storage.createS3HetznerStorageRequest +type StorageCreateS3HetznerStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3HetznerConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 hetzner storage request +func (m *StorageCreateS3HetznerStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3HetznerStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3HetznerStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 hetzner storage request based on the context it is used +func (m *StorageCreateS3HetznerStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3HetznerStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3HetznerStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3HetznerStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3HetznerStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3HetznerStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_intercolo_storage_request.go b/client/swagger/models/storage_create_s3_intercolo_storage_request.go new file mode 100644 index 000000000..d5dfba34d --- /dev/null +++ b/client/swagger/models/storage_create_s3_intercolo_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3IntercoloStorageRequest storage create s3 intercolo storage request +// +// swagger:model storage.createS3IntercoloStorageRequest +type StorageCreateS3IntercoloStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3IntercoloConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 intercolo storage request +func (m *StorageCreateS3IntercoloStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3IntercoloStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3IntercoloStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 intercolo storage request based on the context it is used +func (m *StorageCreateS3IntercoloStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3IntercoloStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3IntercoloStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3IntercoloStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3IntercoloStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3IntercoloStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_mega_storage_request.go b/client/swagger/models/storage_create_s3_mega_storage_request.go new file mode 100644 index 000000000..5ddb87eef --- /dev/null +++ b/client/swagger/models/storage_create_s3_mega_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3MegaStorageRequest storage create s3 mega storage request +// +// swagger:model storage.createS3MegaStorageRequest +type StorageCreateS3MegaStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3MegaConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 mega storage request +func (m *StorageCreateS3MegaStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3MegaStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3MegaStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 mega storage request based on the context it is used +func (m *StorageCreateS3MegaStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3MegaStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3MegaStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3MegaStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3MegaStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3MegaStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_o_v_hcloud_storage_request.go b/client/swagger/models/storage_create_s3_o_v_hcloud_storage_request.go new file mode 100644 index 000000000..370e9dd4b --- /dev/null +++ b/client/swagger/models/storage_create_s3_o_v_hcloud_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3OVHcloudStorageRequest storage create s3 o v hcloud storage request +// +// swagger:model storage.createS3OVHcloudStorageRequest +type StorageCreateS3OVHcloudStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3OVHcloudConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 o v hcloud storage request +func (m *StorageCreateS3OVHcloudStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3OVHcloudStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3OVHcloudStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 o v hcloud storage request based on the context it is used +func (m *StorageCreateS3OVHcloudStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3OVHcloudStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3OVHcloudStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3OVHcloudStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3OVHcloudStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3OVHcloudStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_outscale_storage_request.go b/client/swagger/models/storage_create_s3_outscale_storage_request.go new file mode 100644 index 000000000..76006f50c --- /dev/null +++ b/client/swagger/models/storage_create_s3_outscale_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3OutscaleStorageRequest storage create s3 outscale storage request +// +// swagger:model storage.createS3OutscaleStorageRequest +type StorageCreateS3OutscaleStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3OutscaleConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 outscale storage request +func (m *StorageCreateS3OutscaleStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3OutscaleStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3OutscaleStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 outscale storage request based on the context it is used +func (m *StorageCreateS3OutscaleStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3OutscaleStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3OutscaleStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3OutscaleStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3OutscaleStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3OutscaleStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_rabata_storage_request.go b/client/swagger/models/storage_create_s3_rabata_storage_request.go new file mode 100644 index 000000000..32a5d47eb --- /dev/null +++ b/client/swagger/models/storage_create_s3_rabata_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3RabataStorageRequest storage create s3 rabata storage request +// +// swagger:model storage.createS3RabataStorageRequest +type StorageCreateS3RabataStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3RabataConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 rabata storage request +func (m *StorageCreateS3RabataStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3RabataStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3RabataStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 rabata storage request based on the context it is used +func (m *StorageCreateS3RabataStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3RabataStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3RabataStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3RabataStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3RabataStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3RabataStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_selectel_storage_request.go b/client/swagger/models/storage_create_s3_selectel_storage_request.go new file mode 100644 index 000000000..d7f6ce124 --- /dev/null +++ b/client/swagger/models/storage_create_s3_selectel_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3SelectelStorageRequest storage create s3 selectel storage request +// +// swagger:model storage.createS3SelectelStorageRequest +type StorageCreateS3SelectelStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3SelectelConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 selectel storage request +func (m *StorageCreateS3SelectelStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3SelectelStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3SelectelStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 selectel storage request based on the context it is used +func (m *StorageCreateS3SelectelStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3SelectelStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3SelectelStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3SelectelStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3SelectelStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3SelectelStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_servercore_storage_request.go b/client/swagger/models/storage_create_s3_servercore_storage_request.go new file mode 100644 index 000000000..ea1cc36fd --- /dev/null +++ b/client/swagger/models/storage_create_s3_servercore_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3ServercoreStorageRequest storage create s3 servercore storage request +// +// swagger:model storage.createS3ServercoreStorageRequest +type StorageCreateS3ServercoreStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3ServercoreConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 servercore storage request +func (m *StorageCreateS3ServercoreStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3ServercoreStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3ServercoreStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 servercore storage request based on the context it is used +func (m *StorageCreateS3ServercoreStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3ServercoreStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3ServercoreStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3ServercoreStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3ServercoreStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3ServercoreStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_spectra_logic_storage_request.go b/client/swagger/models/storage_create_s3_spectra_logic_storage_request.go new file mode 100644 index 000000000..4b447c814 --- /dev/null +++ b/client/swagger/models/storage_create_s3_spectra_logic_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3SpectraLogicStorageRequest storage create s3 spectra logic storage request +// +// swagger:model storage.createS3SpectraLogicStorageRequest +type StorageCreateS3SpectraLogicStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3SpectraLogicConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 spectra logic storage request +func (m *StorageCreateS3SpectraLogicStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3SpectraLogicStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3SpectraLogicStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 spectra logic storage request based on the context it is used +func (m *StorageCreateS3SpectraLogicStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3SpectraLogicStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3SpectraLogicStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3SpectraLogicStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3SpectraLogicStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3SpectraLogicStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_zata_storage_request.go b/client/swagger/models/storage_create_s3_zata_storage_request.go new file mode 100644 index 000000000..00ad94a16 --- /dev/null +++ b/client/swagger/models/storage_create_s3_zata_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3ZataStorageRequest storage create s3 zata storage request +// +// swagger:model storage.createS3ZataStorageRequest +type StorageCreateS3ZataStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3ZataConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 zata storage request +func (m *StorageCreateS3ZataStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3ZataStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3ZataStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 zata storage request based on the context it is used +func (m *StorageCreateS3ZataStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3ZataStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3ZataStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3ZataStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3ZataStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3ZataStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_drive_config.go b/client/swagger/models/storage_drive_config.go index a7c9ad1f5..7e5f81076 100644 --- a/client/swagger/models/storage_drive_config.go +++ b/client/swagger/models/storage_drive_config.go @@ -35,6 +35,9 @@ type StorageDriveConfig struct { // Upload chunk size. ChunkSize *string `json:"chunkSize,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // Google Application Client Id ClientID string `json:"clientId,omitempty"` @@ -78,6 +81,9 @@ type StorageDriveConfig struct { // Size of listing chunk 100-1000, 0 to disable. ListChunk *int64 `json:"listChunk,omitempty"` + // Whether the request should enforce expansive access rules. + MetadataEnforceExpansiveAccess *bool `json:"metadataEnforceExpansiveAccess,omitempty"` + // Control whether labels should be read or written in metadata. // Example: off MetadataLabels *string `json:"metadataLabels,omitempty"` diff --git a/client/swagger/models/storage_dropbox_config.go b/client/swagger/models/storage_dropbox_config.go index 637ec7dcb..982e4e403 100644 --- a/client/swagger/models/storage_dropbox_config.go +++ b/client/swagger/models/storage_dropbox_config.go @@ -20,7 +20,7 @@ type StorageDropboxConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` - // Max time to wait for a batch to finish committing + // Max time to wait for a batch to finish committing. (no longer used) BatchCommitTimeout *string `json:"batchCommitTimeout,omitempty"` // Upload file batching sync|async|off. @@ -35,6 +35,9 @@ type StorageDropboxConfig struct { // Upload chunk size (< 150Mi). ChunkSize *string `json:"chunkSize,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` @@ -47,6 +50,9 @@ type StorageDropboxConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` + // Comma separated list of preferred formats for exporting files + ExportFormats *string `json:"exportFormats,omitempty"` + // Impersonate this user when using a business account. Impersonate string `json:"impersonate,omitempty"` @@ -62,6 +68,12 @@ type StorageDropboxConfig struct { // Instructs rclone to work on shared folders. SharedFolders *bool `json:"sharedFolders,omitempty"` + // Show all exportable files in listings. + ShowAllExports *bool `json:"showAllExports,omitempty"` + + // Skip exportable files in all listings. + SkipExports *bool `json:"skipExports,omitempty"` + // OAuth Access Token as a JSON blob. Token string `json:"token,omitempty"` diff --git a/client/swagger/models/storage_ftp_config.go b/client/swagger/models/storage_ftp_config.go index a5de21b28..eec524ad0 100644 --- a/client/swagger/models/storage_ftp_config.go +++ b/client/swagger/models/storage_ftp_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.ftpConfig type StorageFtpConfig struct { + // Allow insecure TLS ciphers + AllowInsecureTLSCiphers *bool `json:"allowInsecureTlsCiphers,omitempty"` + // Allow asking for FTP password when needed. AskPassword *bool `json:"askPassword,omitempty"` @@ -54,12 +57,18 @@ type StorageFtpConfig struct { // FTP host to connect to. Host string `json:"host,omitempty"` + // URL for HTTP CONNECT proxy + HTTPProxy string `json:"httpProxy,omitempty"` + // Max time before closing idle connections. IdleTimeout *string `json:"idleTimeout,omitempty"` // Do not verify the TLS certificate of the server. NoCheckCertificate *bool `json:"noCheckCertificate,omitempty"` + // Don't check the upload is OK + NoCheckUpload *bool `json:"noCheckUpload,omitempty"` + // FTP password. Pass string `json:"pass,omitempty"` diff --git a/client/swagger/models/storage_gcs_config.go b/client/swagger/models/storage_gcs_config.go index 3dfafd062..d7fe605ef 100644 --- a/client/swagger/models/storage_gcs_config.go +++ b/client/swagger/models/storage_gcs_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.gcsConfig type StorageGcsConfig struct { + // Short-lived access token. + AccessToken string `json:"accessToken,omitempty"` + // Access public buckets and objects without credentials. Anonymous *bool `json:"anonymous,omitempty"` @@ -30,6 +33,9 @@ type StorageGcsConfig struct { // Access checks should use bucket-level IAM policies. BucketPolicyOnly *bool `json:"bucketPolicyOnly,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` @@ -48,7 +54,8 @@ type StorageGcsConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for the service. + // Custom endpoint for the storage API. Leave blank to use the provider default. + // Example: storage.example.org Endpoint string `json:"endpoint,omitempty"` // Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars). diff --git a/client/swagger/models/storage_gphotos_config.go b/client/swagger/models/storage_gphotos_config.go index b1a85bbda..2aa5fe387 100644 --- a/client/swagger/models/storage_gphotos_config.go +++ b/client/swagger/models/storage_gphotos_config.go @@ -20,7 +20,7 @@ type StorageGphotosConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` - // Max time to wait for a batch to finish committing + // Max time to wait for a batch to finish committing. (no longer used) BatchCommitTimeout *string `json:"batchCommitTimeout,omitempty"` // Upload file batching sync|async|off. @@ -32,6 +32,9 @@ type StorageGphotosConfig struct { // Max time to allow an idle upload batch before uploading. BatchTimeout *string `json:"batchTimeout,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` @@ -47,6 +50,9 @@ type StorageGphotosConfig struct { // Also view and download archived media. IncludeArchived *bool `json:"includeArchived,omitempty"` + // Use the gphotosdl proxy for downloading the full resolution images + Proxy string `json:"proxy,omitempty"` + // Set to make the Google Photos backend read only. ReadOnly *bool `json:"readOnly,omitempty"` diff --git a/client/swagger/models/storage_hidrive_config.go b/client/swagger/models/storage_hidrive_config.go index 3d9a81573..934bcdb88 100644 --- a/client/swagger/models/storage_hidrive_config.go +++ b/client/swagger/models/storage_hidrive_config.go @@ -23,6 +23,9 @@ type StorageHidriveConfig struct { // Chunksize for chunked uploads. ChunkSize *string `json:"chunkSize,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` diff --git a/client/swagger/models/storage_internetarchive_config.go b/client/swagger/models/storage_internetarchive_config.go deleted file mode 100644 index 38db46d91..000000000 --- a/client/swagger/models/storage_internetarchive_config.go +++ /dev/null @@ -1,71 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// StorageInternetarchiveConfig storage internetarchive config -// -// swagger:model storage.internetarchiveConfig -type StorageInternetarchiveConfig struct { - - // IAS3 Access Key. - AccessKeyID string `json:"accessKeyId,omitempty"` - - // Description of the remote. - Description string `json:"description,omitempty"` - - // Don't ask the server to test against MD5 checksum calculated by rclone. - DisableChecksum *bool `json:"disableChecksum,omitempty"` - - // The encoding for the backend. - Encoding *string `json:"encoding,omitempty"` - - // IAS3 Endpoint. - Endpoint *string `json:"endpoint,omitempty"` - - // Host of InternetArchive Frontend. - FrontEndpoint *string `json:"frontEndpoint,omitempty"` - - // IAS3 Secret Key (password). - SecretAccessKey string `json:"secretAccessKey,omitempty"` - - // Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish. - WaitArchive *string `json:"waitArchive,omitempty"` -} - -// Validate validates this storage internetarchive config -func (m *StorageInternetarchiveConfig) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this storage internetarchive config based on context it is used -func (m *StorageInternetarchiveConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *StorageInternetarchiveConfig) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *StorageInternetarchiveConfig) UnmarshalBinary(b []byte) error { - var res StorageInternetarchiveConfig - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/client/swagger/models/storage_jottacloud_config.go b/client/swagger/models/storage_jottacloud_config.go index 4ecc2644b..e3c6b075d 100644 --- a/client/swagger/models/storage_jottacloud_config.go +++ b/client/swagger/models/storage_jottacloud_config.go @@ -20,6 +20,9 @@ type StorageJottacloudConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` diff --git a/client/swagger/models/storage_local_config.go b/client/swagger/models/storage_local_config.go index 722e8213d..54f4224b8 100644 --- a/client/swagger/models/storage_local_config.go +++ b/client/swagger/models/storage_local_config.go @@ -32,7 +32,10 @@ type StorageLocalConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Translate symlinks to/from regular files with a '.rclonelink' extension. + // Comma separated list of supported checksum types. + Hashes string `json:"hashes,omitempty"` + + // Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend. Links *bool `json:"links,omitempty"` // Don't check to see if the files change during upload. @@ -60,6 +63,9 @@ type StorageLocalConfig struct { // Don't warn about skipped symlinks. SkipLinks *bool `json:"skipLinks,omitempty"` + // Don't warn about skipped pipes, sockets and device objects. + SkipSpecials *bool `json:"skipSpecials,omitempty"` + // Set what kind of time is returned. // Example: mtime TimeType *string `json:"timeType,omitempty"` diff --git a/client/swagger/models/storage_mailru_config.go b/client/swagger/models/storage_mailru_config.go index 3db418ca6..438dc6060 100644 --- a/client/swagger/models/storage_mailru_config.go +++ b/client/swagger/models/storage_mailru_config.go @@ -24,6 +24,9 @@ type StorageMailruConfig struct { // Example: true CheckHash *bool `json:"checkHash,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` diff --git a/client/swagger/models/storage_mega_config.go b/client/swagger/models/storage_mega_config.go index 97d9769d2..f33456fe8 100644 --- a/client/swagger/models/storage_mega_config.go +++ b/client/swagger/models/storage_mega_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.megaConfig type StorageMegaConfig struct { + // The 2FA code of your MEGA account if the account is set up with one + Nr2fa string `json:"2fa,omitempty"` + // Output more debug from Mega. Debug *bool `json:"debug,omitempty"` @@ -29,9 +32,15 @@ type StorageMegaConfig struct { // Delete files permanently rather than putting them into the trash. HardDelete *bool `json:"hardDelete,omitempty"` + // Master key (internal use only) + MasterKey string `json:"masterKey,omitempty"` + // Password. Pass string `json:"pass,omitempty"` + // Session (internal use only) + SessionID string `json:"sessionId,omitempty"` + // Use HTTPS for transfers. UseHTTPS *bool `json:"useHttps,omitempty"` diff --git a/client/swagger/models/storage_onedrive_config.go b/client/swagger/models/storage_onedrive_config.go index e2ad854f8..0759c9624 100644 --- a/client/swagger/models/storage_onedrive_config.go +++ b/client/swagger/models/storage_onedrive_config.go @@ -30,6 +30,9 @@ type StorageOnedriveConfig struct { // Chunk size to upload files with - must be multiple of 320k (327,680 bytes). ChunkSize *string `json:"chunkSize,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` @@ -95,11 +98,17 @@ type StorageOnedriveConfig struct { // Deprecated: use --server-side-across-configs instead. ServerSideAcrossConfigs *bool `json:"serverSideAcrossConfigs,omitempty"` + // ID of the service principal's tenant. Also called its directory ID. + Tenant string `json:"tenant,omitempty"` + // OAuth Access Token as a JSON blob. Token string `json:"token,omitempty"` // Token server url. TokenURL string `json:"tokenUrl,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` } // Validate validates this storage onedrive config diff --git a/client/swagger/models/storage_oos_env_auth_config.go b/client/swagger/models/storage_oos_env_auth_config.go index b7457c939..2a52a5d83 100644 --- a/client/swagger/models/storage_oos_env_auth_config.go +++ b/client/swagger/models/storage_oos_env_auth_config.go @@ -23,7 +23,7 @@ type StorageOosEnvAuthConfig struct { // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` - // Object storage compartment OCID + // Specify compartment OCID, if you need to list buckets. Compartment string `json:"compartment,omitempty"` // Cutoff for switching to multipart copy. diff --git a/client/swagger/models/storage_oos_instance_principal_auth_config.go b/client/swagger/models/storage_oos_instance_principal_auth_config.go index a31d97b07..8cd58d8f7 100644 --- a/client/swagger/models/storage_oos_instance_principal_auth_config.go +++ b/client/swagger/models/storage_oos_instance_principal_auth_config.go @@ -23,7 +23,7 @@ type StorageOosInstancePrincipalAuthConfig struct { // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` - // Object storage compartment OCID + // Specify compartment OCID, if you need to list buckets. Compartment string `json:"compartment,omitempty"` // Cutoff for switching to multipart copy. diff --git a/client/swagger/models/storage_oos_resource_principal_auth_config.go b/client/swagger/models/storage_oos_resource_principal_auth_config.go index d4ec79a31..16c9826eb 100644 --- a/client/swagger/models/storage_oos_resource_principal_auth_config.go +++ b/client/swagger/models/storage_oos_resource_principal_auth_config.go @@ -23,7 +23,7 @@ type StorageOosResourcePrincipalAuthConfig struct { // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` - // Object storage compartment OCID + // Specify compartment OCID, if you need to list buckets. Compartment string `json:"compartment,omitempty"` // Cutoff for switching to multipart copy. diff --git a/client/swagger/models/storage_oos_user_principal_auth_config.go b/client/swagger/models/storage_oos_user_principal_auth_config.go index 75bd0e158..3ecc0ab1b 100644 --- a/client/swagger/models/storage_oos_user_principal_auth_config.go +++ b/client/swagger/models/storage_oos_user_principal_auth_config.go @@ -23,7 +23,7 @@ type StorageOosUserPrincipalAuthConfig struct { // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` - // Object storage compartment OCID + // Specify compartment OCID, if you need to list buckets. Compartment string `json:"compartment,omitempty"` // Path to OCI config file diff --git a/client/swagger/models/storage_oos_workload_identity_auth_config.go b/client/swagger/models/storage_oos_workload_identity_auth_config.go index 58a4ba8cb..c708a9b08 100644 --- a/client/swagger/models/storage_oos_workload_identity_auth_config.go +++ b/client/swagger/models/storage_oos_workload_identity_auth_config.go @@ -23,7 +23,7 @@ type StorageOosWorkloadIdentityAuthConfig struct { // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` - // Object storage compartment OCID + // Specify compartment OCID, if you need to list buckets. Compartment string `json:"compartment,omitempty"` // Cutoff for switching to multipart copy. diff --git a/client/swagger/models/storage_opendrive_config.go b/client/swagger/models/storage_opendrive_config.go index 7bb115339..e8f1608ff 100644 --- a/client/swagger/models/storage_opendrive_config.go +++ b/client/swagger/models/storage_opendrive_config.go @@ -17,6 +17,10 @@ import ( // swagger:model storage.opendriveConfig type StorageOpendriveConfig struct { + // Files and folders will be uploaded with this access permission (default private) + // Example: private + Access *string `json:"access,omitempty"` + // Files will be uploaded in chunks this size. ChunkSize *string `json:"chunkSize,omitempty"` diff --git a/client/swagger/models/storage_pcloud_config.go b/client/swagger/models/storage_pcloud_config.go index bf27af88e..7c79a0092 100644 --- a/client/swagger/models/storage_pcloud_config.go +++ b/client/swagger/models/storage_pcloud_config.go @@ -20,6 +20,9 @@ type StoragePcloudConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` diff --git a/client/swagger/models/storage_premiumizeme_config.go b/client/swagger/models/storage_premiumizeme_config.go index cbda1870d..6a25b8cf1 100644 --- a/client/swagger/models/storage_premiumizeme_config.go +++ b/client/swagger/models/storage_premiumizeme_config.go @@ -23,6 +23,9 @@ type StoragePremiumizemeConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` diff --git a/client/swagger/models/storage_putio_config.go b/client/swagger/models/storage_putio_config.go index 9c3b6b4b7..8483dc3fb 100644 --- a/client/swagger/models/storage_putio_config.go +++ b/client/swagger/models/storage_putio_config.go @@ -20,6 +20,9 @@ type StoragePutioConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` diff --git a/client/swagger/models/storage_s3_a_w_s_config.go b/client/swagger/models/storage_s3_a_w_s_config.go index 6f14973d5..98683ac2b 100644 --- a/client/swagger/models/storage_s3_a_w_s_config.go +++ b/client/swagger/models/storage_s3_a_w_s_config.go @@ -39,6 +39,9 @@ type StorageS3AWSConfig struct { // Description of the remote. Description string `json:"description,omitempty"` + // Set to use AWS Directory Buckets + DirectoryBucket *bool `json:"directoryBucket,omitempty"` + // Upload an empty object with a trailing slash when a new directory is created DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` @@ -113,6 +116,18 @@ type StorageS3AWSConfig struct { // Enables requester pays option when interacting with S3 bucket. RequesterPays *bool `json:"requesterPays,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -128,6 +143,9 @@ type StorageS3AWSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. SseCustomerAlgorithm string `json:"sseCustomerAlgorithm,omitempty"` @@ -144,6 +162,7 @@ type StorageS3AWSConfig struct { SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` // The storage class to use when storing new objects in S3. + // Example: REDUCED_REDUNDANCY StorageClass string `json:"storageClass,omitempty"` // Endpoint for STS (deprecated). @@ -164,6 +183,12 @@ type StorageS3AWSConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -179,6 +204,9 @@ type StorageS3AWSConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_alibaba_config.go b/client/swagger/models/storage_s3_alibaba_config.go index 3851a83b7..c7dfbee4a 100644 --- a/client/swagger/models/storage_s3_alibaba_config.go +++ b/client/swagger/models/storage_s3_alibaba_config.go @@ -54,7 +54,7 @@ type StorageS3AlibabaConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for OSS API. + // Endpoint for S3 API. // Example: oss-accelerate.aliyuncs.com Endpoint string `json:"endpoint,omitempty"` @@ -101,6 +101,18 @@ type StorageS3AlibabaConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -113,7 +125,10 @@ type StorageS3AlibabaConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // The storage class to use when storing new objects in OSS. + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // The storage class to use when storing new objects in S3. StorageClass string `json:"storageClass,omitempty"` // Concurrency for multipart uploads and copies. @@ -128,6 +143,12 @@ type StorageS3AlibabaConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -143,6 +164,9 @@ type StorageS3AlibabaConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_arvan_cloud_config.go b/client/swagger/models/storage_s3_arvan_cloud_config.go index 85a9aea1d..8a934d5e4 100644 --- a/client/swagger/models/storage_s3_arvan_cloud_config.go +++ b/client/swagger/models/storage_s3_arvan_cloud_config.go @@ -54,7 +54,7 @@ type StorageS3ArvanCloudConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Arvan Cloud Object Storage (AOS) API. + // Endpoint for S3 API. // Example: s3.ir-thr-at1.arvanstorage.ir Endpoint string `json:"endpoint,omitempty"` @@ -74,7 +74,7 @@ type StorageS3ArvanCloudConfig struct { // Version of ListObjects to use: 1,2 or 0 for auto. ListVersion int64 `json:"listVersion,omitempty"` - // Location constraint - must match endpoint. + // Location constraint - must be set to match the Region. // Example: ir-thr-at1 LocationConstraint string `json:"locationConstraint,omitempty"` @@ -105,6 +105,18 @@ type StorageS3ArvanCloudConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -117,8 +129,10 @@ type StorageS3ArvanCloudConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // The storage class to use when storing new objects in ArvanCloud. - // Example: STANDARD + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // The storage class to use when storing new objects in S3. StorageClass string `json:"storageClass,omitempty"` // Concurrency for multipart uploads and copies. @@ -133,6 +147,12 @@ type StorageS3ArvanCloudConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -148,6 +168,9 @@ type StorageS3ArvanCloudConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_bizfly_cloud_config.go b/client/swagger/models/storage_s3_bizfly_cloud_config.go new file mode 100644 index 000000000..c6cfa0377 --- /dev/null +++ b/client/swagger/models/storage_s3_bizfly_cloud_config.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3BizflyCloudConfig storage s3 bizfly cloud config +// +// swagger:model storage.s3BizflyCloudConfig +type StorageS3BizflyCloudConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: hn.ss.bfcplatform.vn + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: hn + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 bizfly cloud config +func (m *StorageS3BizflyCloudConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 bizfly cloud config based on context it is used +func (m *StorageS3BizflyCloudConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3BizflyCloudConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3BizflyCloudConfig) UnmarshalBinary(b []byte) error { + var res StorageS3BizflyCloudConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_ceph_config.go b/client/swagger/models/storage_s3_ceph_config.go index 748c80a75..569617a63 100644 --- a/client/swagger/models/storage_s3_ceph_config.go +++ b/client/swagger/models/storage_s3_ceph_config.go @@ -106,6 +106,18 @@ type StorageS3CephConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -121,6 +133,9 @@ type StorageS3CephConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. SseCustomerAlgorithm string `json:"sseCustomerAlgorithm,omitempty"` @@ -148,6 +163,12 @@ type StorageS3CephConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -163,6 +184,9 @@ type StorageS3CephConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_china_mobile_config.go b/client/swagger/models/storage_s3_china_mobile_config.go index 55ece7226..24f6dff8e 100644 --- a/client/swagger/models/storage_s3_china_mobile_config.go +++ b/client/swagger/models/storage_s3_china_mobile_config.go @@ -54,7 +54,7 @@ type StorageS3ChinaMobileConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API. + // Endpoint for S3 API. // Example: eos-wuxi-1.cmecloud.cn Endpoint string `json:"endpoint,omitempty"` @@ -74,7 +74,7 @@ type StorageS3ChinaMobileConfig struct { // Version of ListObjects to use: 1,2 or 0 for auto. ListVersion int64 `json:"listVersion,omitempty"` - // Location constraint - must match endpoint. + // Location constraint - must be set to match the Region. // Example: wuxi1 LocationConstraint string `json:"locationConstraint,omitempty"` @@ -105,6 +105,18 @@ type StorageS3ChinaMobileConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -120,6 +132,9 @@ type StorageS3ChinaMobileConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. SseCustomerAlgorithm string `json:"sseCustomerAlgorithm,omitempty"` @@ -132,7 +147,7 @@ type StorageS3ChinaMobileConfig struct { // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5,omitempty"` - // The storage class to use when storing new objects in ChinaMobile. + // The storage class to use when storing new objects in S3. StorageClass string `json:"storageClass,omitempty"` // Concurrency for multipart uploads and copies. @@ -147,6 +162,12 @@ type StorageS3ChinaMobileConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -162,6 +183,9 @@ type StorageS3ChinaMobileConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_cloudflare_config.go b/client/swagger/models/storage_s3_cloudflare_config.go index 5ac2d2baa..8da60dd5f 100644 --- a/client/swagger/models/storage_s3_cloudflare_config.go +++ b/client/swagger/models/storage_s3_cloudflare_config.go @@ -20,10 +20,6 @@ type StorageS3CloudflareConfig struct { // AWS Access Key ID. AccessKeyID string `json:"accessKeyId,omitempty"` - // Canned ACL used when creating buckets. - // Example: private - BucketACL string `json:"bucketAcl,omitempty"` - // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -101,6 +97,18 @@ type StorageS3CloudflareConfig struct { // Example: auto Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -113,6 +121,9 @@ type StorageS3CloudflareConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -125,6 +136,12 @@ type StorageS3CloudflareConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -140,6 +157,9 @@ type StorageS3CloudflareConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_cubbit_config.go b/client/swagger/models/storage_s3_cubbit_config.go new file mode 100644 index 000000000..837896b06 --- /dev/null +++ b/client/swagger/models/storage_s3_cubbit_config.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3CubbitConfig storage s3 cubbit config +// +// swagger:model storage.s3CubbitConfig +type StorageS3CubbitConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: s3.cubbit.eu + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: eu-west-1 + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 cubbit config +func (m *StorageS3CubbitConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 cubbit config based on context it is used +func (m *StorageS3CubbitConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3CubbitConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3CubbitConfig) UnmarshalBinary(b []byte) error { + var res StorageS3CubbitConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_digital_ocean_config.go b/client/swagger/models/storage_s3_digital_ocean_config.go index f73c444c6..425cc824f 100644 --- a/client/swagger/models/storage_s3_digital_ocean_config.go +++ b/client/swagger/models/storage_s3_digital_ocean_config.go @@ -107,6 +107,18 @@ type StorageS3DigitalOceanConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -119,6 +131,9 @@ type StorageS3DigitalOceanConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -131,6 +146,12 @@ type StorageS3DigitalOceanConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -146,6 +167,9 @@ type StorageS3DigitalOceanConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_dreamhost_config.go b/client/swagger/models/storage_s3_dreamhost_config.go index 6e7d903a2..9aadb2cf4 100644 --- a/client/swagger/models/storage_s3_dreamhost_config.go +++ b/client/swagger/models/storage_s3_dreamhost_config.go @@ -107,6 +107,18 @@ type StorageS3DreamhostConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -119,6 +131,9 @@ type StorageS3DreamhostConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -131,6 +146,12 @@ type StorageS3DreamhostConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -146,6 +167,9 @@ type StorageS3DreamhostConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_exaba_config.go b/client/swagger/models/storage_s3_exaba_config.go new file mode 100644 index 000000000..dcdce65ff --- /dev/null +++ b/client/swagger/models/storage_s3_exaba_config.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3ExabaConfig storage s3 exaba config +// +// swagger:model storage.s3ExabaConfig +type StorageS3ExabaConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Location constraint - must be set to match the Region. + LocationConstraint string `json:"locationConstraint,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 exaba config +func (m *StorageS3ExabaConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 exaba config based on context it is used +func (m *StorageS3ExabaConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3ExabaConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3ExabaConfig) UnmarshalBinary(b []byte) error { + var res StorageS3ExabaConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_file_lu_config.go b/client/swagger/models/storage_s3_file_lu_config.go new file mode 100644 index 000000000..797657546 --- /dev/null +++ b/client/swagger/models/storage_s3_file_lu_config.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3FileLuConfig storage s3 file lu config +// +// swagger:model storage.s3FileLuConfig +type StorageS3FileLuConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: s5lu.com + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: global + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 file lu config +func (m *StorageS3FileLuConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 file lu config based on context it is used +func (m *StorageS3FileLuConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3FileLuConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3FileLuConfig) UnmarshalBinary(b []byte) error { + var res StorageS3FileLuConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_flash_blade_config.go b/client/swagger/models/storage_s3_flash_blade_config.go new file mode 100644 index 000000000..26ac97287 --- /dev/null +++ b/client/swagger/models/storage_s3_flash_blade_config.go @@ -0,0 +1,198 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3FlashBladeConfig storage s3 flash blade config +// +// swagger:model storage.s3FlashBladeConfig +type StorageS3FlashBladeConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 flash blade config +func (m *StorageS3FlashBladeConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 flash blade config based on context it is used +func (m *StorageS3FlashBladeConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3FlashBladeConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3FlashBladeConfig) UnmarshalBinary(b []byte) error { + var res StorageS3FlashBladeConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_g_c_s_config.go b/client/swagger/models/storage_s3_g_c_s_config.go index 15bdadd19..9619539ba 100644 --- a/client/swagger/models/storage_s3_g_c_s_config.go +++ b/client/swagger/models/storage_s3_g_c_s_config.go @@ -54,7 +54,7 @@ type StorageS3GCSConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Google Cloud Storage. + // Endpoint for S3 API. // Example: https://storage.googleapis.com Endpoint string `json:"endpoint,omitempty"` @@ -107,6 +107,18 @@ type StorageS3GCSConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -119,6 +131,9 @@ type StorageS3GCSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -131,6 +146,12 @@ type StorageS3GCSConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -146,6 +167,9 @@ type StorageS3GCSConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_hetzner_config.go b/client/swagger/models/storage_s3_hetzner_config.go new file mode 100644 index 000000000..ed67b82d2 --- /dev/null +++ b/client/swagger/models/storage_s3_hetzner_config.go @@ -0,0 +1,213 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3HetznerConfig storage s3 hetzner config +// +// swagger:model storage.s3HetznerConfig +type StorageS3HetznerConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: hel1.your-objectstorage.com + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Location constraint - must be set to match the Region. + LocationConstraint string `json:"locationConstraint,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: hel1 + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 hetzner config +func (m *StorageS3HetznerConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 hetzner config based on context it is used +func (m *StorageS3HetznerConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3HetznerConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3HetznerConfig) UnmarshalBinary(b []byte) error { + var res StorageS3HetznerConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_huawei_o_b_s_config.go b/client/swagger/models/storage_s3_huawei_o_b_s_config.go index e5b780052..4de0c274a 100644 --- a/client/swagger/models/storage_s3_huawei_o_b_s_config.go +++ b/client/swagger/models/storage_s3_huawei_o_b_s_config.go @@ -54,7 +54,7 @@ type StorageS3HuaweiOBSConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for OBS API. + // Endpoint for S3 API. // Example: obs.af-south-1.myhuaweicloud.com Endpoint string `json:"endpoint,omitempty"` @@ -101,10 +101,22 @@ type StorageS3HuaweiOBSConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` - // Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint. + // Region to connect to. // Example: af-south-1 Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -117,6 +129,9 @@ type StorageS3HuaweiOBSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -129,6 +144,12 @@ type StorageS3HuaweiOBSConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -144,6 +165,9 @@ type StorageS3HuaweiOBSConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_i_b_m_c_o_s_config.go b/client/swagger/models/storage_s3_i_b_m_c_o_s_config.go index eedacde08..ea1dd8f4d 100644 --- a/client/swagger/models/storage_s3_i_b_m_c_o_s_config.go +++ b/client/swagger/models/storage_s3_i_b_m_c_o_s_config.go @@ -55,7 +55,7 @@ type StorageS3IBMCOSConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for IBM COS S3 API. + // Endpoint for S3 API. // Example: s3.us.cloud-object-storage.appdomain.cloud Endpoint string `json:"endpoint,omitempty"` @@ -66,6 +66,12 @@ type StorageS3IBMCOSConfig struct { // If true use path style access if false use virtual hosted style. ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + // IBM API Key to be used to obtain IAM token + IbmAPIKey string `json:"ibmApiKey,omitempty"` + + // IBM service instance id + IbmResourceInstanceID string `json:"ibmResourceInstanceId,omitempty"` + // Size of listing chunk (response list for each ListObject S3 request). ListChunk *int64 `json:"listChunk,omitempty"` @@ -75,7 +81,7 @@ type StorageS3IBMCOSConfig struct { // Version of ListObjects to use: 1,2 or 0 for auto. ListVersion int64 `json:"listVersion,omitempty"` - // Location constraint - must match endpoint when using IBM Cloud Public. + // Location constraint - must be set to match the Region. // Example: us-standard LocationConstraint string `json:"locationConstraint,omitempty"` @@ -109,6 +115,18 @@ type StorageS3IBMCOSConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -121,6 +139,9 @@ type StorageS3IBMCOSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -133,6 +154,12 @@ type StorageS3IBMCOSConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -148,6 +175,9 @@ type StorageS3IBMCOSConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_i_drive_config.go b/client/swagger/models/storage_s3_i_drive_config.go index 94110761d..f848b16f5 100644 --- a/client/swagger/models/storage_s3_i_drive_config.go +++ b/client/swagger/models/storage_s3_i_drive_config.go @@ -97,6 +97,18 @@ type StorageS3IDriveConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -109,6 +121,9 @@ type StorageS3IDriveConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -121,6 +136,12 @@ type StorageS3IDriveConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -136,6 +157,9 @@ type StorageS3IDriveConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_i_o_n_o_s_config.go b/client/swagger/models/storage_s3_i_o_n_o_s_config.go index 441e5c1d5..efddeff6e 100644 --- a/client/swagger/models/storage_s3_i_o_n_o_s_config.go +++ b/client/swagger/models/storage_s3_i_o_n_o_s_config.go @@ -54,7 +54,7 @@ type StorageS3IONOSConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for IONOS S3 Object Storage. + // Endpoint for S3 API. // Example: s3-eu-central-1.ionoscloud.com Endpoint string `json:"endpoint,omitempty"` @@ -101,10 +101,22 @@ type StorageS3IONOSConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` - // Region where your bucket will be created and your data stored. - // Example: de + // Region to connect to. + // Example: eu-central-2 Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -117,6 +129,9 @@ type StorageS3IONOSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -129,6 +144,12 @@ type StorageS3IONOSConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -144,6 +165,9 @@ type StorageS3IONOSConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_intercolo_config.go b/client/swagger/models/storage_s3_intercolo_config.go new file mode 100644 index 000000000..3d3c300b4 --- /dev/null +++ b/client/swagger/models/storage_s3_intercolo_config.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3IntercoloConfig storage s3 intercolo config +// +// swagger:model storage.s3IntercoloConfig +type StorageS3IntercoloConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: de-fra.i3storage.com + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: de-fra + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 intercolo config +func (m *StorageS3IntercoloConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 intercolo config based on context it is used +func (m *StorageS3IntercoloConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3IntercoloConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3IntercoloConfig) UnmarshalBinary(b []byte) error { + var res StorageS3IntercoloConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_leviia_config.go b/client/swagger/models/storage_s3_leviia_config.go index f53de1a23..f76f6f4d0 100644 --- a/client/swagger/models/storage_s3_leviia_config.go +++ b/client/swagger/models/storage_s3_leviia_config.go @@ -55,6 +55,7 @@ type StorageS3LeviiaConfig struct { Encoding *string `json:"encoding,omitempty"` // Endpoint for S3 API. + // Example: s3.leviia.com Endpoint string `json:"endpoint,omitempty"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). @@ -103,6 +104,18 @@ type StorageS3LeviiaConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -115,6 +128,9 @@ type StorageS3LeviiaConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -127,6 +143,12 @@ type StorageS3LeviiaConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -142,6 +164,9 @@ type StorageS3LeviiaConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_liara_config.go b/client/swagger/models/storage_s3_liara_config.go index 7331d7fb4..14431ea63 100644 --- a/client/swagger/models/storage_s3_liara_config.go +++ b/client/swagger/models/storage_s3_liara_config.go @@ -54,7 +54,7 @@ type StorageS3LiaraConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Liara Object Storage API. + // Endpoint for S3 API. // Example: storage.iran.liara.space Endpoint string `json:"endpoint,omitempty"` @@ -101,6 +101,18 @@ type StorageS3LiaraConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -113,8 +125,10 @@ type StorageS3LiaraConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // The storage class to use when storing new objects in Liara - // Example: STANDARD + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // The storage class to use when storing new objects in S3. StorageClass string `json:"storageClass,omitempty"` // Concurrency for multipart uploads and copies. @@ -129,6 +143,12 @@ type StorageS3LiaraConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -144,6 +164,9 @@ type StorageS3LiaraConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_linode_config.go b/client/swagger/models/storage_s3_linode_config.go index c97c205a2..f8b32e4b6 100644 --- a/client/swagger/models/storage_s3_linode_config.go +++ b/client/swagger/models/storage_s3_linode_config.go @@ -54,8 +54,8 @@ type StorageS3LinodeConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Linode Object Storage API. - // Example: us-southeast-1.linodeobjects.com + // Endpoint for S3 API. + // Example: nl-ams-1.linodeobjects.com Endpoint string `json:"endpoint,omitempty"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). @@ -101,6 +101,18 @@ type StorageS3LinodeConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -113,6 +125,9 @@ type StorageS3LinodeConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -125,6 +140,12 @@ type StorageS3LinodeConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -140,6 +161,9 @@ type StorageS3LinodeConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_lyve_cloud_config.go b/client/swagger/models/storage_s3_lyve_cloud_config.go index 99cb7f3e5..a94e63727 100644 --- a/client/swagger/models/storage_s3_lyve_cloud_config.go +++ b/client/swagger/models/storage_s3_lyve_cloud_config.go @@ -55,7 +55,7 @@ type StorageS3LyveCloudConfig struct { Encoding *string `json:"encoding,omitempty"` // Endpoint for S3 API. - // Example: s3.us-east-1.lyvecloud.seagate.com + // Example: s3.us-west-1.{account_name}.lyve.seagate.com Endpoint string `json:"endpoint,omitempty"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). @@ -107,6 +107,18 @@ type StorageS3LyveCloudConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -119,6 +131,9 @@ type StorageS3LyveCloudConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -131,6 +146,12 @@ type StorageS3LyveCloudConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -146,6 +167,9 @@ type StorageS3LyveCloudConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_magalu_config.go b/client/swagger/models/storage_s3_magalu_config.go index 66be4ba50..19b8180d6 100644 --- a/client/swagger/models/storage_s3_magalu_config.go +++ b/client/swagger/models/storage_s3_magalu_config.go @@ -101,6 +101,18 @@ type StorageS3MagaluConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -113,8 +125,10 @@ type StorageS3MagaluConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // The storage class to use when storing new objects in Magalu. - // Example: STANDARD + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // The storage class to use when storing new objects in S3. StorageClass string `json:"storageClass,omitempty"` // Concurrency for multipart uploads and copies. @@ -129,6 +143,12 @@ type StorageS3MagaluConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -144,6 +164,9 @@ type StorageS3MagaluConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_mega_config.go b/client/swagger/models/storage_s3_mega_config.go new file mode 100644 index 000000000..a58261cb1 --- /dev/null +++ b/client/swagger/models/storage_s3_mega_config.go @@ -0,0 +1,203 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3MegaConfig storage s3 mega config +// +// swagger:model storage.s3MegaConfig +type StorageS3MegaConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: s3.eu-central-1.s4.mega.io + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 mega config +func (m *StorageS3MegaConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 mega config based on context it is used +func (m *StorageS3MegaConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3MegaConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3MegaConfig) UnmarshalBinary(b []byte) error { + var res StorageS3MegaConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_minio_config.go b/client/swagger/models/storage_s3_minio_config.go index e4d035d76..9f5d81b9f 100644 --- a/client/swagger/models/storage_s3_minio_config.go +++ b/client/swagger/models/storage_s3_minio_config.go @@ -106,6 +106,18 @@ type StorageS3MinioConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -121,6 +133,9 @@ type StorageS3MinioConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. SseCustomerAlgorithm string `json:"sseCustomerAlgorithm,omitempty"` @@ -148,6 +163,12 @@ type StorageS3MinioConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -163,6 +184,9 @@ type StorageS3MinioConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_netease_config.go b/client/swagger/models/storage_s3_netease_config.go index 88e60e9c1..ae3f52455 100644 --- a/client/swagger/models/storage_s3_netease_config.go +++ b/client/swagger/models/storage_s3_netease_config.go @@ -106,6 +106,18 @@ type StorageS3NeteaseConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -118,6 +130,9 @@ type StorageS3NeteaseConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -130,6 +145,12 @@ type StorageS3NeteaseConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -145,6 +166,9 @@ type StorageS3NeteaseConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_o_v_hcloud_config.go b/client/swagger/models/storage_s3_o_v_hcloud_config.go new file mode 100644 index 000000000..95407c32a --- /dev/null +++ b/client/swagger/models/storage_s3_o_v_hcloud_config.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3OVHcloudConfig storage s3 o v hcloud config +// +// swagger:model storage.s3OVHcloudConfig +type StorageS3OVHcloudConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: s3.gra.io.cloud.ovh.net + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: gra + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 o v hcloud config +func (m *StorageS3OVHcloudConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 o v hcloud config based on context it is used +func (m *StorageS3OVHcloudConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3OVHcloudConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3OVHcloudConfig) UnmarshalBinary(b []byte) error { + var res StorageS3OVHcloudConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_other_config.go b/client/swagger/models/storage_s3_other_config.go index 82e01886f..593d5fccf 100644 --- a/client/swagger/models/storage_s3_other_config.go +++ b/client/swagger/models/storage_s3_other_config.go @@ -106,6 +106,18 @@ type StorageS3OtherConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -118,6 +130,9 @@ type StorageS3OtherConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -130,6 +145,12 @@ type StorageS3OtherConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -145,6 +166,9 @@ type StorageS3OtherConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_outscale_config.go b/client/swagger/models/storage_s3_outscale_config.go new file mode 100644 index 000000000..adaef3b83 --- /dev/null +++ b/client/swagger/models/storage_s3_outscale_config.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3OutscaleConfig storage s3 outscale config +// +// swagger:model storage.s3OutscaleConfig +type StorageS3OutscaleConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: oos.eu-west-2.outscale.com + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: eu-west-2 + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 outscale config +func (m *StorageS3OutscaleConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 outscale config based on context it is used +func (m *StorageS3OutscaleConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3OutscaleConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3OutscaleConfig) UnmarshalBinary(b []byte) error { + var res StorageS3OutscaleConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_petabox_config.go b/client/swagger/models/storage_s3_petabox_config.go index c510ed7e1..c96e2a21b 100644 --- a/client/swagger/models/storage_s3_petabox_config.go +++ b/client/swagger/models/storage_s3_petabox_config.go @@ -54,7 +54,7 @@ type StorageS3PetaboxConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Petabox S3 Object Storage. + // Endpoint for S3 API. // Example: s3.petabox.io Endpoint string `json:"endpoint,omitempty"` @@ -101,10 +101,22 @@ type StorageS3PetaboxConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` - // Region where your bucket will be created and your data stored. - // Example: us-east-1 + // Region to connect to. + // Example: eu-central-1 Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -117,6 +129,9 @@ type StorageS3PetaboxConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -129,6 +144,12 @@ type StorageS3PetaboxConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -144,6 +165,9 @@ type StorageS3PetaboxConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_qiniu_config.go b/client/swagger/models/storage_s3_qiniu_config.go index d9ddd0fed..e672c8ac0 100644 --- a/client/swagger/models/storage_s3_qiniu_config.go +++ b/client/swagger/models/storage_s3_qiniu_config.go @@ -54,7 +54,7 @@ type StorageS3QiniuConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Qiniu Object Storage. + // Endpoint for S3 API. // Example: s3-cn-east-1.qiniucs.com Endpoint string `json:"endpoint,omitempty"` @@ -109,6 +109,18 @@ type StorageS3QiniuConfig struct { // Example: cn-east-1 Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -121,8 +133,11 @@ type StorageS3QiniuConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // The storage class to use when storing new objects in Qiniu. - // Example: STANDARD + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // The storage class to use when storing new objects in S3. + // Example: LINE StorageClass string `json:"storageClass,omitempty"` // Concurrency for multipart uploads and copies. @@ -137,6 +152,12 @@ type StorageS3QiniuConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -152,6 +173,9 @@ type StorageS3QiniuConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_rabata_config.go b/client/swagger/models/storage_s3_rabata_config.go new file mode 100644 index 000000000..ac41ac0b3 --- /dev/null +++ b/client/swagger/models/storage_s3_rabata_config.go @@ -0,0 +1,207 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3RabataConfig storage s3 rabata config +// +// swagger:model storage.s3RabataConfig +type StorageS3RabataConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: s3.us-east-1.rabata.io + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Location constraint - must be set to match the Region. + // Example: us-east-1 + LocationConstraint string `json:"locationConstraint,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: eu-west-1 + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 rabata config +func (m *StorageS3RabataConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 rabata config based on context it is used +func (m *StorageS3RabataConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3RabataConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3RabataConfig) UnmarshalBinary(b []byte) error { + var res StorageS3RabataConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_rack_corp_config.go b/client/swagger/models/storage_s3_rack_corp_config.go index 90e63acfb..79ae99d45 100644 --- a/client/swagger/models/storage_s3_rack_corp_config.go +++ b/client/swagger/models/storage_s3_rack_corp_config.go @@ -54,7 +54,7 @@ type StorageS3RackCorpConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for RackCorp Object Storage. + // Endpoint for S3 API. // Example: s3.rackcorp.com Endpoint string `json:"endpoint,omitempty"` @@ -74,7 +74,7 @@ type StorageS3RackCorpConfig struct { // Version of ListObjects to use: 1,2 or 0 for auto. ListVersion int64 `json:"listVersion,omitempty"` - // Location constraint - the location where your bucket will be located and your data stored. + // Location constraint - must be set to match the Region. // Example: global LocationConstraint string `json:"locationConstraint,omitempty"` @@ -105,10 +105,22 @@ type StorageS3RackCorpConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` - // region - the location where your bucket will be created and your data stored. + // Region to connect to. // Example: global Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -121,6 +133,9 @@ type StorageS3RackCorpConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -133,6 +148,12 @@ type StorageS3RackCorpConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -148,6 +169,9 @@ type StorageS3RackCorpConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_rclone_config.go b/client/swagger/models/storage_s3_rclone_config.go index f7cbd1ee9..ac6c59056 100644 --- a/client/swagger/models/storage_s3_rclone_config.go +++ b/client/swagger/models/storage_s3_rclone_config.go @@ -20,13 +20,6 @@ type StorageS3RcloneConfig struct { // AWS Access Key ID. AccessKeyID string `json:"accessKeyId,omitempty"` - // Canned ACL used when creating buckets and storing or copying objects. - ACL string `json:"acl,omitempty"` - - // Canned ACL used when creating buckets. - // Example: private - BucketACL string `json:"bucketAcl,omitempty"` - // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -73,9 +66,6 @@ type StorageS3RcloneConfig struct { // Version of ListObjects to use: 1,2 or 0 for auto. ListVersion int64 `json:"listVersion,omitempty"` - // Location constraint - must be set to match the Region. - LocationConstraint string `json:"locationConstraint,omitempty"` - // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` @@ -103,8 +93,17 @@ type StorageS3RcloneConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` - // Region to connect to. - Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -118,6 +117,9 @@ type StorageS3RcloneConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -130,6 +132,12 @@ type StorageS3RcloneConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -145,6 +153,9 @@ type StorageS3RcloneConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_scaleway_config.go b/client/swagger/models/storage_s3_scaleway_config.go index 67291be61..a9b867831 100644 --- a/client/swagger/models/storage_s3_scaleway_config.go +++ b/client/swagger/models/storage_s3_scaleway_config.go @@ -54,7 +54,7 @@ type StorageS3ScalewayConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Scaleway Object Storage. + // Endpoint for S3 API. // Example: s3.nl-ams.scw.cloud Endpoint string `json:"endpoint,omitempty"` @@ -105,6 +105,18 @@ type StorageS3ScalewayConfig struct { // Example: nl-ams Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -117,6 +129,9 @@ type StorageS3ScalewayConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // The storage class to use when storing new objects in S3. StorageClass string `json:"storageClass,omitempty"` @@ -132,6 +147,12 @@ type StorageS3ScalewayConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -147,6 +168,9 @@ type StorageS3ScalewayConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_seaweed_f_s_config.go b/client/swagger/models/storage_s3_seaweed_f_s_config.go index 812f2f101..758a1f81d 100644 --- a/client/swagger/models/storage_s3_seaweed_f_s_config.go +++ b/client/swagger/models/storage_s3_seaweed_f_s_config.go @@ -107,6 +107,18 @@ type StorageS3SeaweedFSConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -119,6 +131,9 @@ type StorageS3SeaweedFSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -131,6 +146,12 @@ type StorageS3SeaweedFSConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -146,6 +167,9 @@ type StorageS3SeaweedFSConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_selectel_config.go b/client/swagger/models/storage_s3_selectel_config.go new file mode 100644 index 000000000..9e2eaeaea --- /dev/null +++ b/client/swagger/models/storage_s3_selectel_config.go @@ -0,0 +1,203 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3SelectelConfig storage s3 selectel config +// +// swagger:model storage.s3SelectelConfig +type StorageS3SelectelConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: s3.ru-1.storage.selcloud.ru + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: ru-3 + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 selectel config +func (m *StorageS3SelectelConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 selectel config based on context it is used +func (m *StorageS3SelectelConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3SelectelConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3SelectelConfig) UnmarshalBinary(b []byte) error { + var res StorageS3SelectelConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_servercore_config.go b/client/swagger/models/storage_s3_servercore_config.go new file mode 100644 index 000000000..bf762b868 --- /dev/null +++ b/client/swagger/models/storage_s3_servercore_config.go @@ -0,0 +1,207 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3ServercoreConfig storage s3 servercore config +// +// swagger:model storage.s3ServercoreConfig +type StorageS3ServercoreConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: s3.ru-1.storage.selcloud.ru + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: uz-2 + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 servercore config +func (m *StorageS3ServercoreConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 servercore config based on context it is used +func (m *StorageS3ServercoreConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3ServercoreConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3ServercoreConfig) UnmarshalBinary(b []byte) error { + var res StorageS3ServercoreConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_spectra_logic_config.go b/client/swagger/models/storage_s3_spectra_logic_config.go new file mode 100644 index 000000000..af7716b35 --- /dev/null +++ b/client/swagger/models/storage_s3_spectra_logic_config.go @@ -0,0 +1,198 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3SpectraLogicConfig storage s3 spectra logic config +// +// swagger:model storage.s3SpectraLogicConfig +type StorageS3SpectraLogicConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 spectra logic config +func (m *StorageS3SpectraLogicConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 spectra logic config based on context it is used +func (m *StorageS3SpectraLogicConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3SpectraLogicConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3SpectraLogicConfig) UnmarshalBinary(b []byte) error { + var res StorageS3SpectraLogicConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_stack_path_config.go b/client/swagger/models/storage_s3_stack_path_config.go index 48d24a019..39b6fd0f1 100644 --- a/client/swagger/models/storage_s3_stack_path_config.go +++ b/client/swagger/models/storage_s3_stack_path_config.go @@ -54,7 +54,7 @@ type StorageS3StackPathConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for StackPath Object Storage. + // Endpoint for S3 API. // Example: s3.us-east-2.stackpathstorage.com Endpoint string `json:"endpoint,omitempty"` @@ -104,6 +104,18 @@ type StorageS3StackPathConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -116,6 +128,9 @@ type StorageS3StackPathConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -128,6 +143,12 @@ type StorageS3StackPathConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -143,6 +164,9 @@ type StorageS3StackPathConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_storj_config.go b/client/swagger/models/storage_s3_storj_config.go index fbfc4fb3c..840c63382 100644 --- a/client/swagger/models/storage_s3_storj_config.go +++ b/client/swagger/models/storage_s3_storj_config.go @@ -20,10 +20,6 @@ type StorageS3StorjConfig struct { // AWS Access Key ID. AccessKeyID string `json:"accessKeyId,omitempty"` - // Canned ACL used when creating buckets. - // Example: private - BucketACL string `json:"bucketAcl,omitempty"` - // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -51,7 +47,7 @@ type StorageS3StorjConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Storj Gateway. + // Endpoint for S3 API. // Example: gateway.storjshare.io Endpoint string `json:"endpoint,omitempty"` @@ -98,6 +94,18 @@ type StorageS3StorjConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -110,6 +118,9 @@ type StorageS3StorjConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -122,6 +133,12 @@ type StorageS3StorjConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -137,6 +154,9 @@ type StorageS3StorjConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_synology_config.go b/client/swagger/models/storage_s3_synology_config.go index 29b2a083b..172a5778d 100644 --- a/client/swagger/models/storage_s3_synology_config.go +++ b/client/swagger/models/storage_s3_synology_config.go @@ -20,10 +20,6 @@ type StorageS3SynologyConfig struct { // AWS Access Key ID. AccessKeyID string `json:"accessKeyId,omitempty"` - // Canned ACL used when creating buckets. - // Example: private - BucketACL string `json:"bucketAcl,omitempty"` - // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -51,7 +47,7 @@ type StorageS3SynologyConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Synology C2 Object Storage API. + // Endpoint for S3 API. // Example: eu-001.s3.synologyc2.net Endpoint string `json:"endpoint,omitempty"` @@ -101,10 +97,22 @@ type StorageS3SynologyConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` - // Region where your data stored. + // Region to connect to. // Example: eu-001 Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -117,6 +125,9 @@ type StorageS3SynologyConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -129,6 +140,12 @@ type StorageS3SynologyConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -144,6 +161,9 @@ type StorageS3SynologyConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_tencent_c_o_s_config.go b/client/swagger/models/storage_s3_tencent_c_o_s_config.go index 3eec345b5..1af96224f 100644 --- a/client/swagger/models/storage_s3_tencent_c_o_s_config.go +++ b/client/swagger/models/storage_s3_tencent_c_o_s_config.go @@ -55,7 +55,7 @@ type StorageS3TencentCOSConfig struct { // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Endpoint for Tencent COS API. + // Endpoint for S3 API. // Example: cos.ap-beijing.myqcloud.com Endpoint string `json:"endpoint,omitempty"` @@ -102,6 +102,18 @@ type StorageS3TencentCOSConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -114,7 +126,11 @@ type StorageS3TencentCOSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // The storage class to use when storing new objects in Tencent COS. + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // The storage class to use when storing new objects in S3. + // Example: ARCHIVE StorageClass string `json:"storageClass,omitempty"` // Concurrency for multipart uploads and copies. @@ -129,6 +145,12 @@ type StorageS3TencentCOSConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -144,6 +166,9 @@ type StorageS3TencentCOSConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_wasabi_config.go b/client/swagger/models/storage_s3_wasabi_config.go index b2c6ef7c1..b076eb1e8 100644 --- a/client/swagger/models/storage_s3_wasabi_config.go +++ b/client/swagger/models/storage_s3_wasabi_config.go @@ -107,6 +107,18 @@ type StorageS3WasabiConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + // Set to debug the SDK SdkLogMode *string `json:"sdkLogMode,omitempty"` @@ -119,6 +131,9 @@ type StorageS3WasabiConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` @@ -131,6 +146,12 @@ type StorageS3WasabiConfig struct { // Set if rclone should report BucketAlreadyExists errors on bucket creation. UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + // If true use AWS S3 dual-stack endpoint (IPv6 support). UseDualStack *bool `json:"useDualStack,omitempty"` @@ -146,6 +167,9 @@ type StorageS3WasabiConfig struct { // Whether to use an unsigned payload in PutObject UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` diff --git a/client/swagger/models/storage_s3_zata_config.go b/client/swagger/models/storage_s3_zata_config.go new file mode 100644 index 000000000..f12b21943 --- /dev/null +++ b/client/swagger/models/storage_s3_zata_config.go @@ -0,0 +1,213 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3ZataConfig storage s3 zata config +// +// swagger:model storage.s3ZataConfig +type StorageS3ZataConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: idr01.zata.ai + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Location constraint - must be set to match the Region. + LocationConstraint string `json:"locationConstraint,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + // Example: us-east-1 + Region string `json:"region,omitempty"` + + // ARN of the IAM role to assume. + RoleArn string `json:"roleArn,omitempty"` + + // External ID for assumed role. + RoleExternalID string `json:"roleExternalId,omitempty"` + + // Session duration for assumed role. + RoleSessionDuration string `json:"roleSessionDuration,omitempty"` + + // Session name for assumed role. + RoleSessionName string `json:"roleSessionName,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Set if rclone should include Accept-Encoding as part of the signature. + SignAcceptEncoding *string `json:"signAcceptEncoding,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true, enables arn region support for the service. + UseArnRegion *bool `json:"useArnRegion,omitempty"` + + // If true use AWS S3 data integrity protections. + UseDataIntegrityProtections *string `json:"useDataIntegrityProtections,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // Set if rclone should add x-id URL parameters. + UseXID *string `json:"useXId,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 zata config +func (m *StorageS3ZataConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 zata config based on context it is used +func (m *StorageS3ZataConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3ZataConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3ZataConfig) UnmarshalBinary(b []byte) error { + var res StorageS3ZataConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_sftp_config.go b/client/swagger/models/storage_sftp_config.go index cd21de959..8efdfd051 100644 --- a/client/swagger/models/storage_sftp_config.go +++ b/client/swagger/models/storage_sftp_config.go @@ -20,6 +20,9 @@ type StorageSftpConfig struct { // Allow asking for SFTP password when needed. AskPassword *bool `json:"askPassword,omitempty"` + // The command used to read BLAKE3 hashes. + Blake3sumCommand string `json:"blake3sumCommand,omitempty"` + // Upload and download chunk size. ChunkSize *string `json:"chunkSize,omitempty"` @@ -35,6 +38,9 @@ type StorageSftpConfig struct { // Set to enable server side copies using hardlinks. CopyIsHardlink *bool `json:"copyIsHardlink,omitempty"` + // The command used to read CRC-32 hashes. + Crc32sumCommand string `json:"crc32sumCommand,omitempty"` + // Description of the remote. Description string `json:"description,omitempty"` @@ -47,12 +53,18 @@ type StorageSftpConfig struct { // Disable the execution of SSH commands to determine if remote file hashing is available. DisableHashcheck *bool `json:"disableHashcheck,omitempty"` + // Comma separated list of supported checksum types. + Hashes string `json:"hashes,omitempty"` + // SSH host to connect to. Host string `json:"host,omitempty"` // Space separated list of host key algorithms, ordered by preference. HostKeyAlgorithms string `json:"hostKeyAlgorithms,omitempty"` + // URL for HTTP CONNECT proxy + HTTPProxy string `json:"httpProxy,omitempty"` + // Max time before closing idle connections. IdleTimeout *string `json:"idleTimeout,omitempty"` @@ -78,7 +90,7 @@ type StorageSftpConfig struct { // Space separated list of MACs (message authentication code) algorithms, ordered by preference. Macs string `json:"macs,omitempty"` - // The command used to read md5 hashes. + // The command used to read MD5 hashes. Md5sumCommand string `json:"md5sumCommand,omitempty"` // SSH password, leave blank to use ssh-agent. @@ -90,6 +102,9 @@ type StorageSftpConfig struct { // SSH port number. Port *int64 `json:"port,omitempty"` + // SSH public certificate for public certificate based authentication. + Pubkey string `json:"pubkey,omitempty"` + // Optional path to public key file. PubkeyFile string `json:"pubkeyFile,omitempty"` @@ -102,9 +117,12 @@ type StorageSftpConfig struct { // Set the modified time on the remote if set. SetModtime *bool `json:"setModtime,omitempty"` - // The command used to read sha1 hashes. + // The command used to read SHA-1 hashes. Sha1sumCommand string `json:"sha1sumCommand,omitempty"` + // The command used to read SHA-256 hashes. + Sha256sumCommand string `json:"sha256sumCommand,omitempty"` + // The type of SSH shell on remote server, if any. // Example: none ShellType string `json:"shellType,omitempty"` @@ -130,6 +148,12 @@ type StorageSftpConfig struct { // SSH username. User *string `json:"user,omitempty"` + + // The command used to read XXH128 hashes. + Xxh128sumCommand string `json:"xxh128sumCommand,omitempty"` + + // The command used to read XXH3 hashes. + Xxh3sumCommand string `json:"xxh3sumCommand,omitempty"` } // Validate validates this storage sftp config diff --git a/client/swagger/models/storage_sharefile_config.go b/client/swagger/models/storage_sharefile_config.go index e608f8f52..382ebe3e1 100644 --- a/client/swagger/models/storage_sharefile_config.go +++ b/client/swagger/models/storage_sharefile_config.go @@ -23,6 +23,9 @@ type StorageSharefileConfig struct { // Upload chunk size. ChunkSize *string `json:"chunkSize,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` diff --git a/client/swagger/models/storage_smb_config.go b/client/swagger/models/storage_smb_config.go index 3170bfffb..6b96b0114 100644 --- a/client/swagger/models/storage_smb_config.go +++ b/client/swagger/models/storage_smb_config.go @@ -38,6 +38,9 @@ type StorageSmbConfig struct { // Max time before closing idle connections. IdleTimeout *string `json:"idleTimeout,omitempty"` + // Path to the Kerberos credential cache (krb5cc). + KerberosCcache string `json:"kerberosCcache,omitempty"` + // SMB password. Pass string `json:"pass,omitempty"` @@ -47,6 +50,9 @@ type StorageSmbConfig struct { // Service principal name. Spn string `json:"spn,omitempty"` + // Use Kerberos authentication. + UseKerberos *bool `json:"useKerberos,omitempty"` + // SMB username. User *string `json:"user,omitempty"` } diff --git a/client/swagger/models/storage_uptobox_config.go b/client/swagger/models/storage_uptobox_config.go deleted file mode 100644 index 8bab7e892..000000000 --- a/client/swagger/models/storage_uptobox_config.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// StorageUptoboxConfig storage uptobox config -// -// swagger:model storage.uptoboxConfig -type StorageUptoboxConfig struct { - - // Your access token. - AccessToken string `json:"accessToken,omitempty"` - - // Description of the remote. - Description string `json:"description,omitempty"` - - // The encoding for the backend. - Encoding *string `json:"encoding,omitempty"` - - // Set to make uploaded files private - Private *bool `json:"private,omitempty"` -} - -// Validate validates this storage uptobox config -func (m *StorageUptoboxConfig) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this storage uptobox config based on context it is used -func (m *StorageUptoboxConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *StorageUptoboxConfig) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *StorageUptoboxConfig) UnmarshalBinary(b []byte) error { - var res StorageUptoboxConfig - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/client/swagger/models/storage_webdav_config.go b/client/swagger/models/storage_webdav_config.go index b6c3d1dfc..be2b2f703 100644 --- a/client/swagger/models/storage_webdav_config.go +++ b/client/swagger/models/storage_webdav_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.webdavConfig type StorageWebdavConfig struct { + // Preserve authentication on redirect. + AuthRedirect *bool `json:"authRedirect,omitempty"` + // Bearer token instead of user/pass (e.g. a Macaroon). BearerToken string `json:"bearerToken,omitempty"` diff --git a/client/swagger/models/storage_yandex_config.go b/client/swagger/models/storage_yandex_config.go index bc5eadba2..724889196 100644 --- a/client/swagger/models/storage_yandex_config.go +++ b/client/swagger/models/storage_yandex_config.go @@ -20,6 +20,9 @@ type StorageYandexConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` diff --git a/client/swagger/models/storage_zoho_config.go b/client/swagger/models/storage_zoho_config.go index a76637cd8..04857cc2a 100644 --- a/client/swagger/models/storage_zoho_config.go +++ b/client/swagger/models/storage_zoho_config.go @@ -20,6 +20,9 @@ type StorageZohoConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` + // Use client credentials OAuth flow. + ClientCredentials *bool `json:"clientCredentials,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` @@ -41,6 +44,9 @@ type StorageZohoConfig struct { // Token server url. TokenURL string `json:"tokenUrl,omitempty"` + + // Cutoff for switching to large file upload api (>= 10 MiB). + UploadCutoff *string `json:"uploadCutoff,omitempty"` } // Validate validates this storage zoho config diff --git a/cmd/run/contentprovider.go b/cmd/run/contentprovider.go index f6026ba15..c9c75e02a 100644 --- a/cmd/run/contentprovider.go +++ b/cmd/run/contentprovider.go @@ -31,22 +31,10 @@ var ContentProviderCmd = &cli.Command{ Value: true, }, &cli.BoolFlag{ - Category: "Bitswap Retrieval", - Name: "enable-bitswap", - Usage: "Enable bitswap retrieval", - Value: false, - }, - &cli.StringFlag{ - Category: "Bitswap Retrieval", - Name: "libp2p-identity-key", - Usage: "The base64 encoded private key for libp2p peer", - Value: "", - DefaultText: "AutoGenerated", - }, - &cli.StringSliceFlag{ - Category: "Bitswap Retrieval", - Name: "libp2p-listen", - Usage: "Addresses to listen on for libp2p connections", + Category: "HTTP IPFS Gateway", + Name: "enable-http-ipfs", + Usage: "Enable trustless IPFS gateway on /ipfs/", + Value: true, }, }, Action: func(c *cli.Context) error { @@ -60,13 +48,9 @@ var ContentProviderCmd = &cli.Command{ HTTP: contentprovider.HTTPConfig{ EnablePiece: c.Bool("enable-http-piece"), EnablePieceMetadata: c.Bool("enable-http-piece-metadata"), + EnableIPFS: c.Bool("enable-http-ipfs"), Bind: c.String("http-bind"), }, - Bitswap: contentprovider.BitswapConfig{ - Enable: c.Bool("enable-bitswap"), - IdentityKey: c.String("libp2p-identity-key"), - ListenMultiAddrs: c.StringSlice("libp2p-listen"), - }, } s, err := contentprovider.NewService(db, config) diff --git a/docs/en/SUMMARY.md b/docs/en/SUMMARY.md index f02be6ee7..0dd2ae999 100644 --- a/docs/en/SUMMARY.md +++ b/docs/en/SUMMARY.md @@ -108,35 +108,50 @@ * [Aws](cli-reference/storage/create/s3/aws.md) * [Alibaba](cli-reference/storage/create/s3/alibaba.md) * [Arvancloud](cli-reference/storage/create/s3/arvancloud.md) + * [Bizflycloud](cli-reference/storage/create/s3/bizflycloud.md) * [Ceph](cli-reference/storage/create/s3/ceph.md) * [Chinamobile](cli-reference/storage/create/s3/chinamobile.md) * [Cloudflare](cli-reference/storage/create/s3/cloudflare.md) + * [Cubbit](cli-reference/storage/create/s3/cubbit.md) * [Digitalocean](cli-reference/storage/create/s3/digitalocean.md) * [Dreamhost](cli-reference/storage/create/s3/dreamhost.md) + * [Exaba](cli-reference/storage/create/s3/exaba.md) + * [Filelu](cli-reference/storage/create/s3/filelu.md) + * [Flashblade](cli-reference/storage/create/s3/flashblade.md) * [Google Cloud Storage](cli-reference/storage/create/s3/gcs.md) + * [Hetzner](cli-reference/storage/create/s3/hetzner.md) * [Huaweiobs](cli-reference/storage/create/s3/huaweiobs.md) * [Ibmcos](cli-reference/storage/create/s3/ibmcos.md) * [Idrive](cli-reference/storage/create/s3/idrive.md) * [Ionos](cli-reference/storage/create/s3/ionos.md) + * [Intercolo](cli-reference/storage/create/s3/intercolo.md) * [Leviia](cli-reference/storage/create/s3/leviia.md) * [Liara](cli-reference/storage/create/s3/liara.md) * [Linode](cli-reference/storage/create/s3/linode.md) * [Lyvecloud](cli-reference/storage/create/s3/lyvecloud.md) * [Magalu](cli-reference/storage/create/s3/magalu.md) + * [Mega](cli-reference/storage/create/s3/mega.md) * [Minio](cli-reference/storage/create/s3/minio.md) * [Netease](cli-reference/storage/create/s3/netease.md) + * [Ovhcloud](cli-reference/storage/create/s3/ovhcloud.md) * [Other](cli-reference/storage/create/s3/other.md) + * [Outscale](cli-reference/storage/create/s3/outscale.md) * [Petabox](cli-reference/storage/create/s3/petabox.md) * [Qiniu](cli-reference/storage/create/s3/qiniu.md) + * [Rabata](cli-reference/storage/create/s3/rabata.md) * [Rackcorp](cli-reference/storage/create/s3/rackcorp.md) * [Rclone](cli-reference/storage/create/s3/rclone.md) * [Scaleway](cli-reference/storage/create/s3/scaleway.md) * [Seaweedfs](cli-reference/storage/create/s3/seaweedfs.md) + * [Selectel](cli-reference/storage/create/s3/selectel.md) + * [Servercore](cli-reference/storage/create/s3/servercore.md) + * [Spectralogic](cli-reference/storage/create/s3/spectralogic.md) * [Stackpath](cli-reference/storage/create/s3/stackpath.md) * [Storj](cli-reference/storage/create/s3/storj.md) * [Synology](cli-reference/storage/create/s3/synology.md) * [Tencentcos](cli-reference/storage/create/s3/tencentcos.md) * [Wasabi](cli-reference/storage/create/s3/wasabi.md) + * [Zata](cli-reference/storage/create/s3/zata.md) * [Seafile](cli-reference/storage/create/seafile.md) * [Sftp](cli-reference/storage/create/sftp.md) * [Sharefile](cli-reference/storage/create/sharefile.md) @@ -148,7 +163,6 @@ * [Sugarsync](cli-reference/storage/create/sugarsync.md) * [Swift](cli-reference/storage/create/swift.md) * [Union](cli-reference/storage/create/union.md) - * [Uptobox](cli-reference/storage/create/uptobox.md) * [Webdav](cli-reference/storage/create/webdav.md) * [Yandex](cli-reference/storage/create/yandex.md) * [Zoho](cli-reference/storage/create/zoho.md) @@ -196,35 +210,50 @@ * [Aws](cli-reference/storage/update/s3/aws.md) * [Alibaba](cli-reference/storage/update/s3/alibaba.md) * [Arvancloud](cli-reference/storage/update/s3/arvancloud.md) + * [Bizflycloud](cli-reference/storage/update/s3/bizflycloud.md) * [Ceph](cli-reference/storage/update/s3/ceph.md) * [Chinamobile](cli-reference/storage/update/s3/chinamobile.md) * [Cloudflare](cli-reference/storage/update/s3/cloudflare.md) + * [Cubbit](cli-reference/storage/update/s3/cubbit.md) * [Digitalocean](cli-reference/storage/update/s3/digitalocean.md) * [Dreamhost](cli-reference/storage/update/s3/dreamhost.md) + * [Exaba](cli-reference/storage/update/s3/exaba.md) + * [Filelu](cli-reference/storage/update/s3/filelu.md) + * [Flashblade](cli-reference/storage/update/s3/flashblade.md) * [Google Cloud Storage](cli-reference/storage/update/s3/gcs.md) + * [Hetzner](cli-reference/storage/update/s3/hetzner.md) * [Huaweiobs](cli-reference/storage/update/s3/huaweiobs.md) * [Ibmcos](cli-reference/storage/update/s3/ibmcos.md) * [Idrive](cli-reference/storage/update/s3/idrive.md) * [Ionos](cli-reference/storage/update/s3/ionos.md) + * [Intercolo](cli-reference/storage/update/s3/intercolo.md) * [Leviia](cli-reference/storage/update/s3/leviia.md) * [Liara](cli-reference/storage/update/s3/liara.md) * [Linode](cli-reference/storage/update/s3/linode.md) * [Lyvecloud](cli-reference/storage/update/s3/lyvecloud.md) * [Magalu](cli-reference/storage/update/s3/magalu.md) + * [Mega](cli-reference/storage/update/s3/mega.md) * [Minio](cli-reference/storage/update/s3/minio.md) * [Netease](cli-reference/storage/update/s3/netease.md) + * [Ovhcloud](cli-reference/storage/update/s3/ovhcloud.md) * [Other](cli-reference/storage/update/s3/other.md) + * [Outscale](cli-reference/storage/update/s3/outscale.md) * [Petabox](cli-reference/storage/update/s3/petabox.md) * [Qiniu](cli-reference/storage/update/s3/qiniu.md) + * [Rabata](cli-reference/storage/update/s3/rabata.md) * [Rackcorp](cli-reference/storage/update/s3/rackcorp.md) * [Rclone](cli-reference/storage/update/s3/rclone.md) * [Scaleway](cli-reference/storage/update/s3/scaleway.md) * [Seaweedfs](cli-reference/storage/update/s3/seaweedfs.md) + * [Selectel](cli-reference/storage/update/s3/selectel.md) + * [Servercore](cli-reference/storage/update/s3/servercore.md) + * [Spectralogic](cli-reference/storage/update/s3/spectralogic.md) * [Stackpath](cli-reference/storage/update/s3/stackpath.md) * [Storj](cli-reference/storage/update/s3/storj.md) * [Synology](cli-reference/storage/update/s3/synology.md) * [Tencentcos](cli-reference/storage/update/s3/tencentcos.md) * [Wasabi](cli-reference/storage/update/s3/wasabi.md) + * [Zata](cli-reference/storage/update/s3/zata.md) * [Seafile](cli-reference/storage/update/seafile.md) * [Sftp](cli-reference/storage/update/sftp.md) * [Sharefile](cli-reference/storage/update/sharefile.md) @@ -236,7 +265,6 @@ * [Sugarsync](cli-reference/storage/update/sugarsync.md) * [Swift](cli-reference/storage/update/swift.md) * [Union](cli-reference/storage/update/union.md) - * [Uptobox](cli-reference/storage/update/uptobox.md) * [Webdav](cli-reference/storage/update/webdav.md) * [Yandex](cli-reference/storage/update/yandex.md) * [Zoho](cli-reference/storage/update/zoho.md) diff --git a/docs/en/cli-reference/download.md b/docs/en/cli-reference/download.md index 5f049623d..129394475 100644 --- a/docs/en/cli-reference/download.md +++ b/docs/en/cli-reference/download.md @@ -22,9 +22,10 @@ OPTIONS: --netstorage-secret value Set the NetStorage account secret/G2O key for authentication. [$NETSTORAGE_SECRET] - Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others + Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, BizflyCloud, Ceph, ChinaMobile, Cloudflare, Cubbit, DigitalOcean, Dreamhost, Exaba, FileLu, FlashBlade, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, Intercolo, IONOS, Leviia, Liara, Linode, LyveCloud, Magalu, Mega, Minio, Netease, Outscale, OVHcloud, Petabox, Qiniu, Rabata, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, Servercore, SpectraLogic, StackPath, Storj, Synology, TencentCOS, Wasabi, Zata, Other --s3-access-key-id value AWS Access Key ID. [$S3_ACCESS_KEY_ID] + --s3-ibm-api-key value IBM API Key to be used to obtain IAM token [$S3_IBM_API_KEY] --s3-secret-access-key value AWS Secret Access Key (password). [$S3_SECRET_ACCESS_KEY] --s3-session-token value An AWS session token. [$S3_SESSION_TOKEN] --s3-sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$S3_SSE_CUSTOMER_KEY] @@ -34,7 +35,10 @@ OPTIONS: Backblaze B2 - --b2-key value Application Key. [$B2_KEY] + --b2-key value Application Key. [$B2_KEY] + --b2-sse-customer-key value To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data [$B2_SSE_CUSTOMER_KEY] + --b2-sse-customer-key-base64 value To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data [$B2_SSE_CUSTOMER_KEY_BASE64] + --b2-sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$B2_SSE_CUSTOMER_KEY_MD5] Box @@ -90,6 +94,7 @@ OPTIONS: Google Cloud Storage (this is not Google Drive) + --gcs-access-token value Short-lived access token. [$GCS_ACCESS_TOKEN] --gcs-client-secret value OAuth Client Secret. [$GCS_CLIENT_SECRET] --gcs-token value OAuth Access Token as a JSON blob. [$GCS_TOKEN] --gcs-token-url value Token server url. [$GCS_TOKEN_URL] @@ -137,7 +142,8 @@ OPTIONS: Mega - --mega-pass value Password. [$MEGA_PASS] + --mega-master-key value Master key (internal use only) [$MEGA_MASTER_KEY] + --mega-pass value Password. [$MEGA_PASS] Microsoft Azure Blob Storage @@ -211,6 +217,7 @@ OPTIONS: --sftp-key-pem value Raw PEM-encoded private key. [$SFTP_KEY_PEM] --sftp-key-use-agent When set forces the usage of the ssh-agent. (default: false) [$SFTP_KEY_USE_AGENT] --sftp-pass value SSH password, leave blank to use ssh-agent. [$SFTP_PASS] + --sftp-pubkey value SSH public certificate for public certificate based authentication. [$SFTP_PUBKEY] --sftp-pubkey-file value Optional path to public key file. [$SFTP_PUBKEY_FILE] Sia Decentralized Cloud @@ -228,10 +235,6 @@ OPTIONS: --sugarsync-private-access-key value Sugarsync Private Access Key. [$SUGARSYNC_PRIVATE_ACCESS_KEY] --sugarsync-refresh-token value Sugarsync refresh token. [$SUGARSYNC_REFRESH_TOKEN] - Uptobox - - --uptobox-access-token value Your access token. [$UPTOBOX_ACCESS_TOKEN] - WebDAV --webdav-bearer-token value Bearer token instead of user/pass (e.g. a Macaroon). [$WEBDAV_BEARER_TOKEN] diff --git a/docs/en/cli-reference/run/content-provider.md b/docs/en/cli-reference/run/content-provider.md index 1a4ef8c62..4e013de3c 100644 --- a/docs/en/cli-reference/run/content-provider.md +++ b/docs/en/cli-reference/run/content-provider.md @@ -11,11 +11,9 @@ USAGE: OPTIONS: --help, -h show help - Bitswap Retrieval + HTTP IPFS Gateway - --enable-bitswap Enable bitswap retrieval (default: false) - --libp2p-identity-key value The base64 encoded private key for libp2p peer (default: AutoGenerated) - --libp2p-listen value [ --libp2p-listen value ] Addresses to listen on for libp2p connections + --enable-http-ipfs Enable trustless IPFS gateway on /ipfs/ (default: true) HTTP Piece Metadata Retrieval diff --git a/docs/en/cli-reference/run/download-server.md b/docs/en/cli-reference/run/download-server.md index c5772831d..f9de5d29b 100644 --- a/docs/en/cli-reference/run/download-server.md +++ b/docs/en/cli-reference/run/download-server.md @@ -25,9 +25,10 @@ OPTIONS: --netstorage-secret value Set the NetStorage account secret/G2O key for authentication. [$NETSTORAGE_SECRET] - Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others + Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, BizflyCloud, Ceph, ChinaMobile, Cloudflare, Cubbit, DigitalOcean, Dreamhost, Exaba, FileLu, FlashBlade, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, Intercolo, IONOS, Leviia, Liara, Linode, LyveCloud, Magalu, Mega, Minio, Netease, Outscale, OVHcloud, Petabox, Qiniu, Rabata, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, Servercore, SpectraLogic, StackPath, Storj, Synology, TencentCOS, Wasabi, Zata, Other --s3-access-key-id value AWS Access Key ID. [$S3_ACCESS_KEY_ID] + --s3-ibm-api-key value IBM API Key to be used to obtain IAM token [$S3_IBM_API_KEY] --s3-secret-access-key value AWS Secret Access Key (password). [$S3_SECRET_ACCESS_KEY] --s3-session-token value An AWS session token. [$S3_SESSION_TOKEN] --s3-sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$S3_SSE_CUSTOMER_KEY] @@ -37,7 +38,10 @@ OPTIONS: Backblaze B2 - --b2-key value Application Key. [$B2_KEY] + --b2-key value Application Key. [$B2_KEY] + --b2-sse-customer-key value To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data [$B2_SSE_CUSTOMER_KEY] + --b2-sse-customer-key-base64 value To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data [$B2_SSE_CUSTOMER_KEY_BASE64] + --b2-sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$B2_SSE_CUSTOMER_KEY_MD5] Box @@ -91,6 +95,7 @@ OPTIONS: Google Cloud Storage (this is not Google Drive) + --gcs-access-token value Short-lived access token. [$GCS_ACCESS_TOKEN] --gcs-client-secret value OAuth Client Secret. [$GCS_CLIENT_SECRET] --gcs-token value OAuth Access Token as a JSON blob. [$GCS_TOKEN] --gcs-token-url value Token server url. [$GCS_TOKEN_URL] @@ -138,7 +143,8 @@ OPTIONS: Mega - --mega-pass value Password. [$MEGA_PASS] + --mega-master-key value Master key (internal use only) [$MEGA_MASTER_KEY] + --mega-pass value Password. [$MEGA_PASS] Microsoft Azure Blob Storage @@ -212,6 +218,7 @@ OPTIONS: --sftp-key-pem value Raw PEM-encoded private key. [$SFTP_KEY_PEM] --sftp-key-use-agent When set forces the usage of the ssh-agent. (default: false) [$SFTP_KEY_USE_AGENT] --sftp-pass value SSH password, leave blank to use ssh-agent. [$SFTP_PASS] + --sftp-pubkey value SSH public certificate for public certificate based authentication. [$SFTP_PUBKEY] --sftp-pubkey-file value Optional path to public key file. [$SFTP_PUBKEY_FILE] Sia Decentralized Cloud @@ -229,10 +236,6 @@ OPTIONS: --sugarsync-private-access-key value Sugarsync Private Access Key. [$SUGARSYNC_PRIVATE_ACCESS_KEY] --sugarsync-refresh-token value Sugarsync refresh token. [$SUGARSYNC_REFRESH_TOKEN] - Uptobox - - --uptobox-access-token value Your access token. [$UPTOBOX_ACCESS_TOKEN] - WebDAV --webdav-bearer-token value Bearer token instead of user/pass (e.g. a Macaroon). [$WEBDAV_BEARER_TOKEN] diff --git a/docs/en/cli-reference/storage/create/README.md b/docs/en/cli-reference/storage/create/README.md index 2d6bbca5e..6e16465f6 100644 --- a/docs/en/cli-reference/storage/create/README.md +++ b/docs/en/cli-reference/storage/create/README.md @@ -36,7 +36,7 @@ COMMANDS: premiumizeme premiumize.me putio Put.io qingstor QingCloud Object Storage - s3 Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others + s3 Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, BizflyCloud, Ceph, ChinaMobile, Cloudflare, Cubbit, DigitalOcean, Dreamhost, Exaba, FileLu, FlashBlade, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, Intercolo, IONOS, Leviia, Liara, Linode, LyveCloud, Magalu, Mega, Minio, Netease, Outscale, OVHcloud, Petabox, Qiniu, Rabata, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, Servercore, SpectraLogic, StackPath, Storj, Synology, TencentCOS, Wasabi, Zata, Other seafile seafile sftp SSH/SFTP sharefile Citrix Sharefile @@ -46,7 +46,6 @@ COMMANDS: sugarsync Sugarsync swift OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) union Union merges the contents of several upstream fs - uptobox Uptobox webdav WebDAV yandex Yandex Disk zoho Zoho diff --git a/docs/en/cli-reference/storage/create/azureblob.md b/docs/en/cli-reference/storage/create/azureblob.md index 36b66e558..7f31ca09f 100644 --- a/docs/en/cli-reference/storage/create/azureblob.md +++ b/docs/en/cli-reference/storage/create/azureblob.md @@ -35,6 +35,12 @@ DESCRIPTION: Leave blank if using account/key or Emulator. + --connection-string + Storage Connection String. + + Connection string for the storage. Leave blank if using other auth methods. + + --tenant ID of the service principal's tenant. Also called its directory ID. @@ -118,6 +124,20 @@ DESCRIPTION: keys instead of setting `service_principal_file`. + --disable-instance-discovery + Skip requesting Microsoft Entra instance metadata + + This should be set true only by applications authenticating in + disconnected clouds, or private clouds such as Azure Stack. + + It determines whether rclone requests Microsoft Entra instance + metadata from `https://login.microsoft.com/` before + authenticating. + + Setting this to true will skip this request, making you responsible + for ensuring the configured authority is valid and trustworthy. + + --use-msi Use a managed service identity to authenticate (only works in Azure). @@ -150,6 +170,18 @@ DESCRIPTION: Leave blank if using real azure storage endpoint. + --use-az + Use Azure CLI tool az for authentication + + Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/) + as the sole means of authentication. + + Setting this can be useful if you wish to use the az CLI on a host with + a System Managed Identity that you do not want to use. + + Don't set env_auth at the same time. + + --endpoint Endpoint for the service. @@ -183,6 +215,41 @@ DESCRIPTION: "--transfers" * "--azureblob-upload-concurrency" chunks stored at once in memory. + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of chunk_size using the put block list API. + + Files smaller than this limit will be copied with the Copy Blob API. + + --copy-concurrency + Concurrency for multipart copy. + + This is the number of chunks of the same file that are copied + concurrently. + + These chunks are not buffered in memory and Microsoft recommends + setting this value to greater than 1000 in the azcopy documentation. + + https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-optimize#increase-concurrency + + In tests, copy speed increases almost linearly with copy + concurrency. + + --use-copy-blob + Whether to use the Copy Blob API when copying to the same storage account. + + If true (the default) then rclone will use the Copy Blob API for + copies to the same storage account even when the size is above the + copy_cutoff. + + Rclone assumes that the same storage account means the same config + and does not check for the same storage account in different configs. + + There should be no need to change this value. + + --list-chunk Size of blob list. @@ -290,6 +357,7 @@ OPTIONS: --client-certificate-path value Path to a PEM or PKCS12 certificate file including the private key. [$CLIENT_CERTIFICATE_PATH] --client-id value The ID of the client in use. [$CLIENT_ID] --client-secret value One of the service principal's client secrets [$CLIENT_SECRET] + --connection-string value Storage Connection String. [$CONNECTION_STRING] --env-auth Read credentials from runtime (environment variables, CLI or MSI). (default: false) [$ENV_AUTH] --help, -h show help --key value Storage Account Shared Key. [$KEY] @@ -302,10 +370,13 @@ OPTIONS: --archive-tier-delete Delete archive tier blobs before overwriting. (default: false) [$ARCHIVE_TIER_DELETE] --chunk-size value Upload chunk size. (default: "4Mi") [$CHUNK_SIZE] --client-send-certificate-chain Send the certificate chain when using certificate auth. (default: false) [$CLIENT_SEND_CERTIFICATE_CHAIN] + --copy-concurrency value Concurrency for multipart copy. (default: 512) [$COPY_CONCURRENCY] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "8Mi") [$COPY_CUTOFF] --delete-snapshots value Set to specify how to deal with snapshots on blob deletion. [$DELETE_SNAPSHOTS] --description value Description of the remote. [$DESCRIPTION] --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-instance-discovery Skip requesting Microsoft Entra instance metadata (default: false) [$DISABLE_INSTANCE_DISCOVERY] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8") [$ENCODING] --endpoint value Endpoint for the service. [$ENDPOINT] --list-chunk value Size of blob list. (default: 5000) [$LIST_CHUNK] @@ -321,6 +392,8 @@ OPTIONS: --service-principal-file value Path to file containing credentials for use with a service principal. [$SERVICE_PRINCIPAL_FILE] --upload-concurrency value Concurrency for multipart uploads. (default: 16) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload (<= 256 MiB) (deprecated). [$UPLOAD_CUTOFF] + --use-az Use Azure CLI tool az for authentication (default: false) [$USE_AZ] + --use-copy-blob Whether to use the Copy Blob API when copying to the same storage account. (default: true) [$USE_COPY_BLOB] --use-emulator Uses local storage emulator if provided as 'true'. (default: false) [$USE_EMULATOR] --use-msi Use a managed service identity to authenticate (only works in Azure). (default: false) [$USE_MSI] --username value User name (usually an email address) [$USERNAME] diff --git a/docs/en/cli-reference/storage/create/b2.md b/docs/en/cli-reference/storage/create/b2.md index 4adb9378e..7a8f2610f 100644 --- a/docs/en/cli-reference/storage/create/b2.md +++ b/docs/en/cli-reference/storage/create/b2.md @@ -152,6 +152,38 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --sse-customer-algorithm + If using SSE-C, the server-side encryption algorithm used when storing this object in B2. + + Examples: + | | None + | AES256 | Advanced Encryption Standard (256 bits key length) + + --sse-customer-key + To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data + + Alternatively you can provide --sse-customer-key-base64. + + Examples: + | | None + + --sse-customer-key-base64 + To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data + + Alternatively you can provide --sse-customer-key. + + Examples: + | | None + + --sse-customer-key-md5 + If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + + If you leave it blank, this is calculated automatically from the sse_customer_key provided. + + + Examples: + | | None + --description Description of the remote. @@ -164,22 +196,26 @@ OPTIONS: Advanced - --chunk-size value Upload chunk size. (default: "96Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4Gi") [$COPY_CUTOFF] - --description value Description of the remote. [$DESCRIPTION] - --disable-checksum Disable checksums for large (> upload cutoff) files. (default: false) [$DISABLE_CHECKSUM] - --download-auth-duration value Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. (default: "1w") [$DOWNLOAD_AUTH_DURATION] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --endpoint value Endpoint for the service. [$ENDPOINT] - --lifecycle value Set the number of days deleted files should be kept when creating a bucket. (default: 0) [$LIFECYCLE] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] - --test-mode value A flag string for X-Bz-Test-Mode header for debugging. [$TEST_MODE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --chunk-size value Upload chunk size. (default: "96Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4Gi") [$COPY_CUTOFF] + --description value Description of the remote. [$DESCRIPTION] + --disable-checksum Disable checksums for large (> upload cutoff) files. (default: false) [$DISABLE_CHECKSUM] + --download-auth-duration value Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. (default: "1w") [$DOWNLOAD_AUTH_DURATION] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --endpoint value Endpoint for the service. [$ENDPOINT] + --lifecycle value Set the number of days deleted files should be kept when creating a bucket. (default: 0) [$LIFECYCLE] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in B2. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --test-mode value A flag string for X-Bz-Test-Mode header for debugging. [$TEST_MODE] + --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config diff --git a/docs/en/cli-reference/storage/create/box.md b/docs/en/cli-reference/storage/create/box.md index e0bdd12ae..3ef7c7d6f 100644 --- a/docs/en/cli-reference/storage/create/box.md +++ b/docs/en/cli-reference/storage/create/box.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --root-folder-id Fill in for rclone to use a non root folder as its starting point. @@ -42,6 +49,11 @@ DESCRIPTION: Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`. + --config-credentials + Box App config.json contents. + + Leave blank normally. + --access-token Box App Primary Access Token @@ -90,16 +102,18 @@ DESCRIPTION: OPTIONS: - --access-token value Box App Primary Access Token [$ACCESS_TOKEN] - --box-config-file value Box App config.json location [$BOX_CONFIG_FILE] - --box-sub-type value (default: "user") [$BOX_SUB_TYPE] - --client-id value OAuth Client Id. [$CLIENT_ID] - --client-secret value OAuth Client Secret. [$CLIENT_SECRET] - --help, -h show help + --access-token value Box App Primary Access Token [$ACCESS_TOKEN] + --box-config-file value Box App config.json location [$BOX_CONFIG_FILE] + --box-sub-type value (default: "user") [$BOX_SUB_TYPE] + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --config-credentials value Box App config.json contents. [$CONFIG_CREDENTIALS] + --help, -h show help Advanced --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --commit-retries value Max number of times to try committing a multipart file. (default: 100) [$COMMIT_RETRIES] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot") [$ENCODING] diff --git a/docs/en/cli-reference/storage/create/drive.md b/docs/en/cli-reference/storage/create/drive.md index 820f5790a..317b2aa28 100644 --- a/docs/en/cli-reference/storage/create/drive.md +++ b/docs/en/cli-reference/storage/create/drive.md @@ -33,6 +33,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --scope Comma separated list of scopes that rclone should use when requesting access from drive. @@ -415,6 +422,15 @@ DESCRIPTION: | failok | If writing fails log errors only, don't fail the transfer | read,write | Read and Write the value. + --metadata-enforce-expansive-access + Whether the request should enforce expansive access rules. + + From Feb 2026 this flag will be set by default so this flag can be used for + testing before then. + + See: https://developers.google.com/workspace/drive/api/guides/limited-expansive-access + + --encoding The encoding for the backend. @@ -448,6 +464,7 @@ OPTIONS: --auth-owner-only Only consider files owned by the authenticated user. (default: false) [$AUTH_OWNER_ONLY] --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Upload chunk size. (default: "8Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut. (default: false) [$COPY_SHORTCUT_CONTENT] --description value Description of the remote. [$DESCRIPTION] --disable-http2 Disable drive using http2. (default: true) [$DISABLE_HTTP2] @@ -460,6 +477,7 @@ OPTIONS: --import-formats value Comma separated list of preferred formats for uploading Google docs. [$IMPORT_FORMATS] --keep-revision-forever Keep new head revision of each file forever. (default: false) [$KEEP_REVISION_FOREVER] --list-chunk value Size of listing chunk 100-1000, 0 to disable. (default: 1000) [$LIST_CHUNK] + --metadata-enforce-expansive-access Whether the request should enforce expansive access rules. (default: false) [$METADATA_ENFORCE_EXPANSIVE_ACCESS] --metadata-labels value Control whether labels should be read or written in metadata. (default: "off") [$METADATA_LABELS] --metadata-owner value Control whether owner should be read or written in metadata. (default: "read") [$METADATA_OWNER] --metadata-permissions value Control whether permissions should be read or written in metadata. (default: "off") [$METADATA_PERMISSIONS] diff --git a/docs/en/cli-reference/storage/create/dropbox.md b/docs/en/cli-reference/storage/create/dropbox.md index c5f903d3b..5dc5d6606 100644 --- a/docs/en/cli-reference/storage/create/dropbox.md +++ b/docs/en/cli-reference/storage/create/dropbox.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --chunk-size Upload chunk size (< 150Mi). @@ -95,6 +102,32 @@ DESCRIPTION: --root-namespace Specify a different Dropbox namespace ID to use as the root for all paths. + --export-formats + Comma separated list of preferred formats for exporting files + + Certain Dropbox files can only be accessed by exporting them to another format. + These include Dropbox Paper documents. + + For each such file, rclone will choose the first format on this list that Dropbox + considers valid. If none is valid, it will choose Dropbox's default format. + + Known formats include: "html", "md" (markdown) + + --skip-exports + Skip exportable files in all listings. + + If given, exportable files practically become invisible to rclone. + + --show-all-exports + Show all exportable files in listings. + + Adding this flag will allow all exportable files to be server side copied. + Note that rclone doesn't add extensions to the exportable file names in this mode. + + Do **not** use this flag when trying to download exportable files - rclone + will fail to download them. + + --batch-mode Upload file batching sync|async|off. @@ -147,7 +180,7 @@ DESCRIPTION: --batch-commit-timeout - Max time to wait for a batch to finish committing + Max time to wait for a batch to finish committing. (no longer used) --description Description of the remote. @@ -161,18 +194,22 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] - --batch-commit-timeout value Max time to wait for a batch to finish committing (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] + --batch-commit-timeout value Max time to wait for a batch to finish committing. (no longer used) (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] --batch-mode value Upload file batching sync|async|off. (default: "sync") [$BATCH_MODE] --batch-size value Max number of files in upload batch. (default: 0) [$BATCH_SIZE] --batch-timeout value Max time to allow an idle upload batch before uploading. (default: "0s") [$BATCH_TIMEOUT] --chunk-size value Upload chunk size (< 150Mi). (default: "48Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot") [$ENCODING] + --export-formats value Comma separated list of preferred formats for exporting files (default: "html,md") [$EXPORT_FORMATS] --impersonate value Impersonate this user when using a business account. [$IMPERSONATE] --pacer-min-sleep value Minimum time to sleep between API calls. (default: "10ms") [$PACER_MIN_SLEEP] --root-namespace value Specify a different Dropbox namespace ID to use as the root for all paths. [$ROOT_NAMESPACE] --shared-files Instructs rclone to work on individual shared files. (default: false) [$SHARED_FILES] --shared-folders Instructs rclone to work on shared folders. (default: false) [$SHARED_FOLDERS] + --show-all-exports Show all exportable files in listings. (default: false) [$SHOW_ALL_EXPORTS] + --skip-exports Skip exportable files in all listings. (default: false) [$SKIP_EXPORTS] --token value OAuth Access Token as a JSON blob. [$TOKEN] --token-url value Token server url. [$TOKEN_URL] diff --git a/docs/en/cli-reference/storage/create/ftp.md b/docs/en/cli-reference/storage/create/ftp.md index f3abb06d6..73f7bd0f6 100644 --- a/docs/en/cli-reference/storage/create/ftp.md +++ b/docs/en/cli-reference/storage/create/ftp.md @@ -95,6 +95,14 @@ DESCRIPTION: --disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) + --allow-insecure-tls-ciphers + Allow insecure TLS ciphers + + Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults: + + - TLS_RSA_WITH_AES_128_GCM_SHA256 + + --shut-timeout Maximum time to wait for data connection closing status. @@ -107,12 +115,38 @@ DESCRIPTION: --socks-proxy Socks 5 proxy host. - Supports the format user:pass@host:port, user@host:port, host:port. - - Example: + Supports the format user:pass@host:port, user@host:port, host:port. - myUser:myPass@localhost:9005 + Example: + myUser:myPass@localhost:9005 + + + --http-proxy + URL for HTTP CONNECT proxy + + Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. + + Supports the format http://user:pass@host:port, http://host:port, http://host. + + Example: + + http://myUser:myPass@proxyhostname.example.com:8000 + + + --no-check-upload + Don't check the upload is OK + + Normally rclone will try to check the upload exists after it has + uploaded a file to make sure the size and modification time are as + expected. + + This flag stops rclone doing these checks. This enables uploading to + folders which are write only. + + You will likely need to use the --inplace flag also if uploading to + a write only folder. + --encoding The encoding for the backend. @@ -139,22 +173,25 @@ OPTIONS: Advanced - --ask-password Allow asking for FTP password when needed. (default: false) [$ASK_PASSWORD] - --close-timeout value Maximum time to wait for a response to close. (default: "1m0s") [$CLOSE_TIMEOUT] - --concurrency value Maximum number of FTP simultaneous connections, 0 for unlimited. (default: 0) [$CONCURRENCY] - --description value Description of the remote. [$DESCRIPTION] - --disable-epsv Disable using EPSV even if server advertises support. (default: false) [$DISABLE_EPSV] - --disable-mlsd Disable using MLSD even if server advertises support. (default: false) [$DISABLE_MLSD] - --disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) (default: false) [$DISABLE_TLS13] - --disable-utf8 Disable using UTF-8 even if server advertises support. (default: false) [$DISABLE_UTF8] - --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,RightSpace,Dot") [$ENCODING] - --force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD. (default: false) [$FORCE_LIST_HIDDEN] - --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] - --no-check-certificate Do not verify the TLS certificate of the server. (default: false) [$NO_CHECK_CERTIFICATE] - --shut-timeout value Maximum time to wait for data connection closing status. (default: "1m0s") [$SHUT_TIMEOUT] - --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] - --tls-cache-size value Size of TLS session cache for all control and data connections. (default: 32) [$TLS_CACHE_SIZE] - --writing-mdtm Use MDTM to set modification time (VsFtpd quirk) (default: false) [$WRITING_MDTM] + --allow-insecure-tls-ciphers Allow insecure TLS ciphers (default: false) [$ALLOW_INSECURE_TLS_CIPHERS] + --ask-password Allow asking for FTP password when needed. (default: false) [$ASK_PASSWORD] + --close-timeout value Maximum time to wait for a response to close. (default: "1m0s") [$CLOSE_TIMEOUT] + --concurrency value Maximum number of FTP simultaneous connections, 0 for unlimited. (default: 0) [$CONCURRENCY] + --description value Description of the remote. [$DESCRIPTION] + --disable-epsv Disable using EPSV even if server advertises support. (default: false) [$DISABLE_EPSV] + --disable-mlsd Disable using MLSD even if server advertises support. (default: false) [$DISABLE_MLSD] + --disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) (default: false) [$DISABLE_TLS13] + --disable-utf8 Disable using UTF-8 even if server advertises support. (default: false) [$DISABLE_UTF8] + --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,RightSpace,Dot") [$ENCODING] + --force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD. (default: false) [$FORCE_LIST_HIDDEN] + --http-proxy value URL for HTTP CONNECT proxy [$HTTP_PROXY] + --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] + --no-check-certificate Do not verify the TLS certificate of the server. (default: false) [$NO_CHECK_CERTIFICATE] + --no-check-upload Don't check the upload is OK (default: false) [$NO_CHECK_UPLOAD] + --shut-timeout value Maximum time to wait for data connection closing status. (default: "1m0s") [$SHUT_TIMEOUT] + --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] + --tls-cache-size value Size of TLS session cache for all control and data connections. (default: 32) [$TLS_CACHE_SIZE] + --writing-mdtm Use MDTM to set modification time (VsFtpd quirk) (default: false) [$WRITING_MDTM] Client Config diff --git a/docs/en/cli-reference/storage/create/gcs.md b/docs/en/cli-reference/storage/create/gcs.md index d2980af30..a4bb77d85 100644 --- a/docs/en/cli-reference/storage/create/gcs.md +++ b/docs/en/cli-reference/storage/create/gcs.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --project-number Project number. @@ -56,6 +63,12 @@ DESCRIPTION: Leave blank normally. Needed only if you want use SA instead of interactive login. + --access-token + Short-lived access token. + + Leave blank normally. + Needed only if you want use short-lived access token instead of interactive login. + --anonymous Access public buckets and objects without credentials. @@ -136,6 +149,7 @@ DESCRIPTION: | us-central1 | Iowa | us-east1 | South Carolina | us-east4 | Northern Virginia + | us-east5 | Ohio | us-west1 | Oregon | us-west2 | California | us-west3 | Salt Lake City @@ -186,9 +200,19 @@ DESCRIPTION: --endpoint - Endpoint for the service. + Custom endpoint for the storage API. Leave blank to use the provider default. - Leave blank normally. + When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint), + the subpath will be ignored during upload operations due to a limitation in the + underlying Google API Go client library. + Download and listing operations will work correctly with the full endpoint path. + If you require subpath support for uploads, avoid using subpaths in your custom + endpoint configuration. + + Examples: + | storage.example.org | Specify a custom endpoint + | storage.example.org:4443 | Specifying a custom endpoint with port + | storage.example.org:4443/gcs/api | Specifying a subpath, see the note, uploads won't use the custom path! --encoding The encoding for the backend. @@ -226,15 +250,17 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --description value Description of the remote. [$DESCRIPTION] - --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] - --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] - --endpoint value Endpoint for the service. [$ENDPOINT] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --access-token value Short-lived access token. [$ACCESS_TOKEN] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] + --endpoint value Custom endpoint for the storage API. Leave blank to use the provider default. [$ENDPOINT] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config diff --git a/docs/en/cli-reference/storage/create/gphotos.md b/docs/en/cli-reference/storage/create/gphotos.md index 90be0f18e..655fab834 100644 --- a/docs/en/cli-reference/storage/create/gphotos.md +++ b/docs/en/cli-reference/storage/create/gphotos.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --read-only Set to make the Google Photos backend read only. @@ -65,6 +72,32 @@ DESCRIPTION: Without this flag, archived media will not be visible in directory listings and won't be transferred. + --proxy + Use the gphotosdl proxy for downloading the full resolution images + + The Google API will deliver images and video which aren't full + resolution, and/or have EXIF data missing. + + However if you use the gphotosdl proxy then you can download original, + unchanged images. + + This runs a headless browser in the background. + + Download the software from [gphotosdl](https://github.com/rclone/gphotosdl) + + First run with + + gphotosdl -login + + Then once you have logged into google photos close the browser window + and run + + gphotosdl + + Then supply the parameter `--gphotos-proxy "http://localhost:8282"` to make + rclone use the proxy. + + --encoding The encoding for the backend. @@ -120,7 +153,7 @@ DESCRIPTION: --batch-commit-timeout - Max time to wait for a batch to finish committing + Max time to wait for a batch to finish committing. (no longer used) --description Description of the remote. @@ -135,13 +168,15 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] - --batch-commit-timeout value Max time to wait for a batch to finish committing (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] + --batch-commit-timeout value Max time to wait for a batch to finish committing. (no longer used) (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] --batch-mode value Upload file batching sync|async|off. (default: "sync") [$BATCH_MODE] --batch-size value Max number of files in upload batch. (default: 0) [$BATCH_SIZE] --batch-timeout value Max time to allow an idle upload batch before uploading. (default: "0s") [$BATCH_TIMEOUT] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] --include-archived Also view and download archived media. (default: false) [$INCLUDE_ARCHIVED] + --proxy value Use the gphotosdl proxy for downloading the full resolution images [$PROXY] --read-size Set to read the size of media items. (default: false) [$READ_SIZE] --start-year value Year limits the photos to be downloaded to those which are uploaded after the given year. (default: 2000) [$START_YEAR] --token value OAuth Access Token as a JSON blob. [$TOKEN] diff --git a/docs/en/cli-reference/storage/create/hidrive.md b/docs/en/cli-reference/storage/create/hidrive.md index 1068f63fc..0a090781f 100644 --- a/docs/en/cli-reference/storage/create/hidrive.md +++ b/docs/en/cli-reference/storage/create/hidrive.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --scope-access Access permissions that rclone should use when requesting access from HiDrive. @@ -121,6 +128,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Chunksize for chunked uploads. (default: "48Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary. (default: false) [$DISABLE_FETCHING_MEMBER_COUNT] --encoding value The encoding for the backend. (default: "Slash,Dot") [$ENCODING] diff --git a/docs/en/cli-reference/storage/create/internetarchive.md b/docs/en/cli-reference/storage/create/internetarchive.md index f226f4f74..237cc6cb4 100644 --- a/docs/en/cli-reference/storage/create/internetarchive.md +++ b/docs/en/cli-reference/storage/create/internetarchive.md @@ -30,6 +30,15 @@ DESCRIPTION: Leave blank for default value. + --item-metadata + Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set. + Format is key=value and the 'x-archive-meta-' prefix is automatically added. + + --item-derive + Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload. + The derive process produces a number of secondary files from an upload to make an upload more usable on the web. + Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure. + --disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone. Normally rclone will calculate the MD5 checksum of the input before @@ -54,6 +63,7 @@ DESCRIPTION: OPTIONS: --access-key-id value IAS3 Access Key. [$ACCESS_KEY_ID] --help, -h show help + --item-derive Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload. (default: true) [$ITEM_DERIVE] --secret-access-key value IAS3 Secret Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -63,6 +73,7 @@ OPTIONS: --encoding value The encoding for the backend. (default: "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --endpoint value IAS3 Endpoint. (default: "https://s3.us.archive.org") [$ENDPOINT] --front-endpoint value Host of InternetArchive Frontend. (default: "https://archive.org") [$FRONT_ENDPOINT] + --item-metadata value Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set. [$ITEM_METADATA] --wait-archive value Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish. (default: "0s") [$WAIT_ARCHIVE] Client Config diff --git a/docs/en/cli-reference/storage/create/jottacloud.md b/docs/en/cli-reference/storage/create/jottacloud.md index fedd18369..d96bcaa16 100644 --- a/docs/en/cli-reference/storage/create/jottacloud.md +++ b/docs/en/cli-reference/storage/create/jottacloud.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --md5-memory-limit Files bigger than this will be cached on disk to calculate the MD5 if required. @@ -66,6 +73,7 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] diff --git a/docs/en/cli-reference/storage/create/local.md b/docs/en/cli-reference/storage/create/local.md index 9b4decdf2..afb49f117 100644 --- a/docs/en/cli-reference/storage/create/local.md +++ b/docs/en/cli-reference/storage/create/local.md @@ -19,7 +19,7 @@ DESCRIPTION: Follow symlinks and copy the pointed to item. --links - Translate symlinks to/from regular files with a '.rclonelink' extension. + Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend. --skip-links Don't warn about skipped symlinks. @@ -27,6 +27,13 @@ DESCRIPTION: This flag disables warning messages on skipped symlinks or junction points, as you explicitly acknowledge that they should be skipped. + --skip-specials + Don't warn about skipped pipes, sockets and device objects. + + This flag disables warning messages on skipped pipes, sockets and + device objects, as you explicitly acknowledge that they should be + skipped. + --zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated). @@ -176,6 +183,9 @@ DESCRIPTION: | btime | The creation time. | ctime | The last status change time. + --hashes + Comma separated list of supported checksum types. + --encoding The encoding for the backend. @@ -195,7 +205,8 @@ OPTIONS: --copy-links, -L Follow symlinks and copy the pointed to item. (default: false) [$COPY_LINKS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Dot") [$ENCODING] - --links, -l Translate symlinks to/from regular files with a '.rclonelink' extension. (default: false) [$LINKS] + --hashes value Comma separated list of supported checksum types. [$HASHES] + --links Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend. (default: false) [$LINKS] --no-check-updated Don't check to see if the files change during upload. (default: false) [$NO_CHECK_UPDATED] --no-clone Disable reflink cloning for server-side copies. (default: false) [$NO_CLONE] --no-preallocate Disable preallocation of disk space for transferred files. (default: false) [$NO_PREALLOCATE] @@ -204,6 +215,7 @@ OPTIONS: --nounc Disable UNC (long path names) conversion on Windows. (default: false) [$NOUNC] --one-file-system, -x Don't cross filesystem boundaries (unix/macOS only). (default: false) [$ONE_FILE_SYSTEM] --skip-links Don't warn about skipped symlinks. (default: false) [$SKIP_LINKS] + --skip-specials Don't warn about skipped pipes, sockets and device objects. (default: false) [$SKIP_SPECIALS] --time-type value Set what kind of time is returned. (default: "mtime") [$TIME_TYPE] --unicode-normalization Apply unicode NFC normalization to paths and filenames. (default: false) [$UNICODE_NORMALIZATION] --zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated). (default: false) [$ZERO_SIZE_LINKS] diff --git a/docs/en/cli-reference/storage/create/mailru.md b/docs/en/cli-reference/storage/create/mailru.md index 21dc520bc..2a471497f 100644 --- a/docs/en/cli-reference/storage/create/mailru.md +++ b/docs/en/cli-reference/storage/create/mailru.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --user User name (usually email). @@ -130,6 +137,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --check-hash What should copy do if file checksum is mismatched or invalid. (default: true) [$CHECK_HASH] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --quirks value Comma separated list of internal maintenance flags. [$QUIRKS] diff --git a/docs/en/cli-reference/storage/create/mega.md b/docs/en/cli-reference/storage/create/mega.md index 1e6c87782..bcd9e4991 100644 --- a/docs/en/cli-reference/storage/create/mega.md +++ b/docs/en/cli-reference/storage/create/mega.md @@ -15,6 +15,15 @@ DESCRIPTION: --pass Password. + --2fa + The 2FA code of your MEGA account if the account is set up with one + + --session-id + Session (internal use only) + + --master-key + Master key (internal use only) + --debug Output more debug from Mega. @@ -47,6 +56,7 @@ DESCRIPTION: OPTIONS: + --2fa value The 2FA code of your MEGA account if the account is set up with one [$2FA] --help, -h show help --pass value Password. [$PASS] --user value User name. [$USER] @@ -57,6 +67,8 @@ OPTIONS: --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] + --master-key value Master key (internal use only) [$MASTER_KEY] + --session-id value Session (internal use only) [$SESSION_ID] --use-https Use HTTPS for transfers. (default: false) [$USE_HTTPS] Client Config diff --git a/docs/en/cli-reference/storage/create/onedrive.md b/docs/en/cli-reference/storage/create/onedrive.md index 85645aeeb..c6f071b42 100644 --- a/docs/en/cli-reference/storage/create/onedrive.md +++ b/docs/en/cli-reference/storage/create/onedrive.md @@ -32,15 +32,35 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --region Choose national cloud region for OneDrive. Examples: | global | Microsoft Cloud Global | us | Microsoft Cloud for US Government - | de | Microsoft Cloud Germany + | de | Microsoft Cloud Germany (deprecated - try global region first). | cn | Azure and Office 365 operated by Vnet Group in China + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + + This is disabled by default as uploading using single part uploads + causes rclone to use twice the storage on Onedrive business as when + rclone sets the modification time after the upload Onedrive creates a + new version. + + See: https://github.com/rclone/rclone/issues/1716 + + --chunk-size Chunk size to upload files with - must be multiple of 320k (327,680 bytes). @@ -74,6 +94,13 @@ DESCRIPTION: | Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All offline_access | Read and write access to all resources, without the ability to browse SharePoint sites. | | Same as if disable_site_permission was set to true + --tenant + ID of the service principal's tenant. Also called its directory ID. + + Set this if using + - Client Credential flow + + --disable-site-permission Disable the request for Sites.Read.All permission. @@ -221,7 +248,7 @@ DESCRIPTION: As a rule of thumb if nearly all of your data is under rclone's root directory (the `root/directory` in `onedrive:root/directory`) then - using this flag will be be a big performance win. If your data is + using this flag will be a big performance win. If your data is mostly not under the root then using this flag will be a big performance loss. @@ -257,6 +284,7 @@ OPTIONS: --client-secret value OAuth Client Secret. [$CLIENT_SECRET] --help, -h show help --region value Choose national cloud region for OneDrive. (default: "global") [$REGION] + --tenant value ID of the service principal's tenant. Also called its directory ID. [$TENANT] Advanced @@ -264,6 +292,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --av-override Allows download of files the server thinks has a virus. (default: false) [$AV_OVERRIDE] --chunk-size value Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default: "10Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --delta If set rclone will use delta listing to implement recursive listings. (default: false) [$DELTA] --description value Description of the remote. [$DESCRIPTION] --disable-site-permission Disable the request for Sites.Read.All permission. (default: false) [$DISABLE_SITE_PERMISSION] @@ -283,6 +312,7 @@ OPTIONS: --server-side-across-configs Deprecated: use --server-side-across-configs instead. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] --token value OAuth Access Token as a JSON blob. [$TOKEN] --token-url value Token server url. [$TOKEN_URL] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "off") [$UPLOAD_CUTOFF] Client Config diff --git a/docs/en/cli-reference/storage/create/oos/env_auth.md b/docs/en/cli-reference/storage/create/oos/env_auth.md index 840e3bfdf..07c11b4d0 100644 --- a/docs/en/cli-reference/storage/create/oos/env_auth.md +++ b/docs/en/cli-reference/storage/create/oos/env_auth.md @@ -13,7 +13,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -188,7 +190,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --endpoint value Endpoint for Object storage API. [$ENDPOINT] --help, -h show help --namespace value Object storage namespace [$NAMESPACE] diff --git a/docs/en/cli-reference/storage/create/oos/instance_principal_auth.md b/docs/en/cli-reference/storage/create/oos/instance_principal_auth.md index 88df806e7..94cd9e4c3 100644 --- a/docs/en/cli-reference/storage/create/oos/instance_principal_auth.md +++ b/docs/en/cli-reference/storage/create/oos/instance_principal_auth.md @@ -17,7 +17,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -192,7 +194,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --endpoint value Endpoint for Object storage API. [$ENDPOINT] --help, -h show help --namespace value Object storage namespace [$NAMESPACE] diff --git a/docs/en/cli-reference/storage/create/oos/resource_principal_auth.md b/docs/en/cli-reference/storage/create/oos/resource_principal_auth.md index f9c0d5994..8d3af71bb 100644 --- a/docs/en/cli-reference/storage/create/oos/resource_principal_auth.md +++ b/docs/en/cli-reference/storage/create/oos/resource_principal_auth.md @@ -13,7 +13,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -188,7 +190,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --endpoint value Endpoint for Object storage API. [$ENDPOINT] --help, -h show help --namespace value Object storage namespace [$NAMESPACE] diff --git a/docs/en/cli-reference/storage/create/oos/user_principal_auth.md b/docs/en/cli-reference/storage/create/oos/user_principal_auth.md index ff7103dfd..7b7670f17 100644 --- a/docs/en/cli-reference/storage/create/oos/user_principal_auth.md +++ b/docs/en/cli-reference/storage/create/oos/user_principal_auth.md @@ -17,7 +17,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -204,7 +206,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --config-file value Path to OCI config file (default: "~/.oci/config") [$CONFIG_FILE] --config-profile value Profile name inside the oci config file (default: "Default") [$CONFIG_PROFILE] --endpoint value Endpoint for Object storage API. [$ENDPOINT] diff --git a/docs/en/cli-reference/storage/create/oos/workload_identity_auth.md b/docs/en/cli-reference/storage/create/oos/workload_identity_auth.md index a193b51a2..a08718f0d 100644 --- a/docs/en/cli-reference/storage/create/oos/workload_identity_auth.md +++ b/docs/en/cli-reference/storage/create/oos/workload_identity_auth.md @@ -15,7 +15,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -190,7 +192,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --endpoint value Endpoint for Object storage API. [$ENDPOINT] --help, -h show help --namespace value Object storage namespace [$NAMESPACE] diff --git a/docs/en/cli-reference/storage/create/opendrive.md b/docs/en/cli-reference/storage/create/opendrive.md index 122ee75f9..4bf79be4e 100644 --- a/docs/en/cli-reference/storage/create/opendrive.md +++ b/docs/en/cli-reference/storage/create/opendrive.md @@ -26,6 +26,14 @@ DESCRIPTION: Note that these chunks are buffered in memory so increasing them will increase memory use. + --access + Files and folders will be uploaded with this access permission (default private) + + Examples: + | private | The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them. + | public | The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way, + | hidden | The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents + --description Description of the remote. @@ -37,6 +45,7 @@ OPTIONS: Advanced + --access value Files and folders will be uploaded with this access permission (default private) (default: "private") [$ACCESS] --chunk-size value Files will be uploaded in chunks this size. (default: "10Mi") [$CHUNK_SIZE] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot") [$ENCODING] diff --git a/docs/en/cli-reference/storage/create/pcloud.md b/docs/en/cli-reference/storage/create/pcloud.md index 8b362caae..f40f5b8dc 100644 --- a/docs/en/cli-reference/storage/create/pcloud.md +++ b/docs/en/cli-reference/storage/create/pcloud.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --encoding The encoding for the backend. @@ -74,6 +81,7 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --hostname value Hostname to connect to. (default: "api.pcloud.com") [$HOSTNAME] diff --git a/docs/en/cli-reference/storage/create/premiumizeme.md b/docs/en/cli-reference/storage/create/premiumizeme.md index 2f634dd43..7155e356e 100644 --- a/docs/en/cli-reference/storage/create/premiumizeme.md +++ b/docs/en/cli-reference/storage/create/premiumizeme.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --api-key API Key. @@ -55,11 +62,12 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config diff --git a/docs/en/cli-reference/storage/create/putio.md b/docs/en/cli-reference/storage/create/putio.md index 1e90b2492..1245ee0b2 100644 --- a/docs/en/cli-reference/storage/create/putio.md +++ b/docs/en/cli-reference/storage/create/putio.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --encoding The encoding for the backend. @@ -48,11 +55,12 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config diff --git a/docs/en/cli-reference/storage/create/s3/README.md b/docs/en/cli-reference/storage/create/s3/README.md index d49063755..a1206e19d 100644 --- a/docs/en/cli-reference/storage/create/s3/README.md +++ b/docs/en/cli-reference/storage/create/s3/README.md @@ -1,9 +1,9 @@ -# Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others +# Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, BizflyCloud, Ceph, ChinaMobile, Cloudflare, Cubbit, DigitalOcean, Dreamhost, Exaba, FileLu, FlashBlade, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, Intercolo, IONOS, Leviia, Liara, Linode, LyveCloud, Magalu, Mega, Minio, Netease, Outscale, OVHcloud, Petabox, Qiniu, Rabata, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, Servercore, SpectraLogic, StackPath, Storj, Synology, TencentCOS, Wasabi, Zata, Other {% code fullWidth="true" %} ``` NAME: - singularity storage create s3 - Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others + singularity storage create s3 - Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, BizflyCloud, Ceph, ChinaMobile, Cloudflare, Cubbit, DigitalOcean, Dreamhost, Exaba, FileLu, FlashBlade, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, Intercolo, IONOS, Leviia, Liara, Linode, LyveCloud, Magalu, Mega, Minio, Netease, Outscale, OVHcloud, Petabox, Qiniu, Rabata, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, Servercore, SpectraLogic, StackPath, Storj, Synology, TencentCOS, Wasabi, Zata, Other USAGE: singularity storage create s3 command [command options] @@ -12,35 +12,50 @@ COMMANDS: aws Amazon Web Services (AWS) S3 alibaba Alibaba Cloud Object Storage System (OSS) formerly Aliyun arvancloud Arvan Cloud Object Storage (AOS) + bizflycloud Bizfly Cloud Simple Storage ceph Ceph Object Storage chinamobile China Mobile Ecloud Elastic Object Storage (EOS) cloudflare Cloudflare R2 Storage + cubbit Cubbit DS3 Object Storage digitalocean DigitalOcean Spaces dreamhost Dreamhost DreamObjects + exaba Exaba Object Storage + filelu FileLu S5 (S3-Compatible Object Storage) + flashblade Pure Storage FlashBlade Object Storage gcs Google Cloud Storage + hetzner Hetzner Object Storage huaweiobs Huawei Object Storage Service ibmcos IBM COS S3 idrive IDrive e2 ionos IONOS Cloud + intercolo Intercolo Object Storage leviia Leviia Object Storage liara Liara Object Storage linode Linode Object Storage lyvecloud Seagate Lyve Cloud magalu Magalu Object Storage + mega MEGA S4 Object Storage minio Minio Object Storage netease Netease Object Storage (NOS) + ovhcloud OVHcloud Object Storage other Any other S3 compatible provider + outscale OUTSCALE Object Storage (OOS) petabox Petabox Object Storage qiniu Qiniu Object Storage (Kodo) + rabata Rabata Cloud Storage rackcorp RackCorp Object Storage rclone Rclone S3 Server scaleway Scaleway Object Storage seaweedfs SeaweedFS S3 + selectel Selectel Object Storage + servercore Servercore Object Storage + spectralogic Spectra Logic Black Pearl stackpath StackPath Object Storage storj Storj (S3 Compatible Gateway) synology Synology C2 Object Storage tencentcos Tencent Cloud Object Storage (COS) wasabi Wasabi Object Storage + zata Zata (S3 compatible Gateway) help, h Shows a list of commands or help for one command OPTIONS: diff --git a/docs/en/cli-reference/storage/create/s3/alibaba.md b/docs/en/cli-reference/storage/create/s3/alibaba.md index 4e3dc7715..186734e59 100644 --- a/docs/en/cli-reference/storage/create/s3/alibaba.md +++ b/docs/en/cli-reference/storage/create/s3/alibaba.md @@ -29,7 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for OSS API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | oss-accelerate.aliyuncs.com | Global Accelerate @@ -96,13 +98,7 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in OSS. - - Examples: - | | Default - | STANDARD | Standard storage class - | GLACIER | Archive storage mode - | STANDARD_IA | Infrequent access storage mode + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -195,6 +191,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -234,6 +250,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -374,6 +393,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -386,7 +410,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -488,6 +512,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -514,11 +562,11 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for OSS API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in OSS. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -545,18 +593,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/arvancloud.md b/docs/en/cli-reference/storage/create/s3/arvancloud.md index 06077910b..d41a141af 100644 --- a/docs/en/cli-reference/storage/create/s3/arvancloud.md +++ b/docs/en/cli-reference/storage/create/s3/arvancloud.md @@ -29,7 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Arvan Cloud Object Storage (AOS) API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3.ir-thr-at1.arvanstorage.ir | The default endpoint - a good choice if you are unsure. @@ -37,9 +39,9 @@ DESCRIPTION: | s3.ir-tbz-sh1.arvanstorage.ir | Tabriz Iran (Shahriar) --location-constraint - Location constraint - must match endpoint. + Location constraint - must be set to match the Region. - Used when creating buckets only. + Leave blank if not sure. Used when creating buckets only. Examples: | ir-thr-at1 | Tehran Iran (Simin) @@ -83,10 +85,7 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in ArvanCloud. - - Examples: - | STANDARD | Standard storage class + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -179,6 +178,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -218,6 +237,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -358,6 +380,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -370,7 +397,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -472,6 +499,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -498,12 +549,12 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Arvan Cloud Object Storage (AOS) API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --location-constraint value Location constraint - must match endpoint. [$LOCATION_CONSTRAINT] + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in ArvanCloud. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -530,18 +581,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/aws.md b/docs/en/cli-reference/storage/create/s3/aws.md index 2a8a21206..75d6a33df 100644 --- a/docs/en/cli-reference/storage/create/s3/aws.md +++ b/docs/en/cli-reference/storage/create/s3/aws.md @@ -30,6 +30,8 @@ DESCRIPTION: --region Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. Examples: | us-east-1 | The default endpoint - a good choice if you are unsure. @@ -89,12 +91,12 @@ DESCRIPTION: --endpoint Endpoint for S3 API. - Leave blank if using AWS to use the default endpoint for the region. + Required when using an S3 clone. --location-constraint Location constraint - must be set to match the Region. - Used when creating buckets only. + Leave blank if not sure. Used when creating buckets only. Examples: | | Empty for US Region, Northern Virginia, or Pacific Northwest @@ -167,10 +169,6 @@ DESCRIPTION: --server-side-encryption The server-side encryption algorithm used when storing this object in S3. - Examples: - | | None - | AES256 | AES256 - --sse-customer-algorithm If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -214,15 +212,12 @@ DESCRIPTION: The storage class to use when storing new objects in S3. Examples: - | | Default - | STANDARD | Standard storage class | REDUCED_REDUNDANCY | Reduced redundancy storage class | STANDARD_IA | Standard Infrequent Access storage class | ONEZONE_IA | One Zone Infrequent Access storage class - | GLACIER | Glacier storage class + | GLACIER | Glacier Flexible Retrieval storage class | DEEP_ARCHIVE | Glacier Deep Archive storage class | INTELLIGENT_TIERING | Intelligent-Tiering storage class - | GLACIER_IR | Glacier Instant Retrieval storage class --upload-cutoff Cutoff for switching to chunked upload. @@ -315,6 +310,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -359,6 +374,9 @@ DESCRIPTION: See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html) + --use-arn-region + If true, enables arn region support for the service. + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. @@ -507,6 +525,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -519,7 +542,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -626,6 +649,56 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --directory-bucket + Set to use AWS Directory Buckets + + If you are using an AWS Directory Bucket then set this flag. + + This will ensure no `Content-Md5` headers are sent and ensure `ETag` + headers are not interpreted as MD5 sums. `X-Amz-Meta-Md5chksum` will + be set on all objects whether single or multipart uploaded. + + This also sets `no_check_bucket = true`. + + Note that Directory Buckets do not support: + + - Versioning + - `Content-Encoding: gzip` + + Rclone limitations with Directory Buckets: + + - rclone does not support creating Directory Buckets with `rclone mkdir` + - ... or removing them with `rclone rmdir` yet + - Directory Buckets do not appear when doing `rclone lsf` at the top level. + - Rclone can't remove auto created directories yet. In theory this should + work with `directory_markers = true` but it doesn't. + - Directories don't seem to appear in recursive (ListR) listings. + + --sdk-log-mode Set to debug the SDK @@ -669,6 +742,7 @@ OPTIONS: --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] --description value Description of the remote. [$DESCRIPTION] + --directory-bucket Set to use AWS Directory Buckets (default: false) [$DIRECTORY_BUCKET] --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] @@ -689,9 +763,14 @@ OPTIONS: --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] --requester-pays Enables requester pays option when interacting with S3 bucket. (default: false) [$REQUESTER_PAYS] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] @@ -702,11 +781,14 @@ OPTIONS: --use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. (default: false) [$USE_ACCELERATE_ENDPOINT] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/bizflycloud.md b/docs/en/cli-reference/storage/create/s3/bizflycloud.md new file mode 100644 index 000000000..73693a8c2 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/bizflycloud.md @@ -0,0 +1,634 @@ +# Bizfly Cloud Simple Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 bizflycloud - Bizfly Cloud Simple Storage + +USAGE: + singularity storage create s3 bizflycloud [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | hn | Ha Noi + | hcm | Ho Chi Minh + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | hn.ss.bfcplatform.vn | Hanoi endpoint + | hcm.ss.bfcplatform.vn | Ho Chi Minh endpoint + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/ceph.md b/docs/en/cli-reference/storage/create/s3/ceph.md index c3d3e7851..e68e0b3e3 100644 --- a/docs/en/cli-reference/storage/create/s3/ceph.md +++ b/docs/en/cli-reference/storage/create/s3/ceph.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -89,10 +83,6 @@ DESCRIPTION: --server-side-encryption The server-side encryption algorithm used when storing this object in S3. - Examples: - | | None - | AES256 | AES256 - --sse-customer-algorithm If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -223,6 +213,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -262,6 +272,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -402,6 +415,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -414,7 +432,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -516,6 +534,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -576,9 +618,14 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] @@ -587,11 +634,14 @@ OPTIONS: --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/chinamobile.md b/docs/en/cli-reference/storage/create/s3/chinamobile.md index eb477b958..db9c07f3c 100644 --- a/docs/en/cli-reference/storage/create/s3/chinamobile.md +++ b/docs/en/cli-reference/storage/create/s3/chinamobile.md @@ -29,7 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | eos-wuxi-1.cmecloud.cn | The default endpoint - a good choice if you are unsure. @@ -65,9 +67,9 @@ DESCRIPTION: | eos-anhui-1.cmecloud.cn | Anhui China (Huainan) --location-constraint - Location constraint - must match endpoint. + Location constraint - must be set to match the Region. - Used when creating buckets only. + Leave blank if not sure. Used when creating buckets only. Examples: | wuxi1 | East China (Suzhou) @@ -86,7 +88,7 @@ DESCRIPTION: | chengdu1 | Southwest China (Chengdu) | chongqing1 | Southwest China (Chongqing) | guiyang1 | Southwest China (Guiyang) - | xian1 | Nouthwest China (Xian) + | xian1 | Northwest China (Xian) | yunnan | Yunnan China (Kunming) | yunnan2 | Yunnan China (Kunming-2) | tianjin1 | Tianjin China (Tianjin) @@ -141,10 +143,6 @@ DESCRIPTION: --server-side-encryption The server-side encryption algorithm used when storing this object in S3. - Examples: - | | None - | AES256 | AES256 - --sse-customer-algorithm If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -178,13 +176,7 @@ DESCRIPTION: | | None --storage-class - The storage class to use when storing new objects in ChinaMobile. - - Examples: - | | Default - | STANDARD | Standard storage class - | GLACIER | Archive storage mode - | STANDARD_IA | Infrequent access storage mode + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -277,6 +269,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -316,6 +328,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -456,6 +471,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -468,7 +488,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -570,6 +590,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -596,13 +640,13 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --location-constraint value Location constraint - must match endpoint. [$LOCATION_CONSTRAINT] + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] --server-side-encryption value The server-side encryption algorithm used when storing this object in S3. [$SERVER_SIDE_ENCRYPTION] - --storage-class value The storage class to use when storing new objects in ChinaMobile. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -629,9 +673,14 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] @@ -640,11 +689,14 @@ OPTIONS: --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/cloudflare.md b/docs/en/cli-reference/storage/create/s3/cloudflare.md index 0575fe6dc..42f9e5fd5 100644 --- a/docs/en/cli-reference/storage/create/s3/cloudflare.md +++ b/docs/en/cli-reference/storage/create/s3/cloudflare.md @@ -30,6 +30,8 @@ DESCRIPTION: --region Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. Examples: | auto | R2 buckets are automatically distributed across Cloudflare's data centers for low latency. @@ -39,29 +41,6 @@ DESCRIPTION: Required when using an S3 clone. - --bucket-acl - Canned ACL used when creating buckets. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when only when creating buckets. If it - isn't set then "acl" is used instead. - - If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: - header is added and the default (private) will be used. - - - Examples: - | private | Owner gets FULL_CONTROL. - | | No one else has access rights (default). - | public-read | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ access. - | public-read-write | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ and WRITE access. - | | Granting this on a bucket is generally not recommended. - | authenticated-read | Owner gets FULL_CONTROL. - | | The AuthenticatedUsers group gets READ access. - --upload-cutoff Cutoff for switching to chunked upload. @@ -153,6 +132,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -192,6 +191,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -332,6 +334,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -344,7 +351,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -446,6 +453,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -479,7 +510,6 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] @@ -502,18 +532,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/cubbit.md b/docs/en/cli-reference/storage/create/s3/cubbit.md new file mode 100644 index 000000000..785570741 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/cubbit.md @@ -0,0 +1,632 @@ +# Cubbit DS3 Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 cubbit - Cubbit DS3 Object Storage + +USAGE: + singularity storage create s3 cubbit [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | eu-west-1 | Europe West + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.cubbit.eu | Cubbit DS3 Object Storage endpoint + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/digitalocean.md b/docs/en/cli-reference/storage/create/s3/digitalocean.md index cc84b3c88..814080346 100644 --- a/docs/en/cli-reference/storage/create/s3/digitalocean.md +++ b/docs/en/cli-reference/storage/create/s3/digitalocean.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -47,10 +41,14 @@ DESCRIPTION: Examples: | syd1.digitaloceanspaces.com | DigitalOcean Spaces Sydney 1 | sfo3.digitaloceanspaces.com | DigitalOcean Spaces San Francisco 3 + | sfo2.digitaloceanspaces.com | DigitalOcean Spaces San Francisco 2 | fra1.digitaloceanspaces.com | DigitalOcean Spaces Frankfurt 1 | nyc3.digitaloceanspaces.com | DigitalOcean Spaces New York 3 | ams3.digitaloceanspaces.com | DigitalOcean Spaces Amsterdam 3 | sgp1.digitaloceanspaces.com | DigitalOcean Spaces Singapore 1 + | lon1.digitaloceanspaces.com | DigitalOcean Spaces London 1 + | tor1.digitaloceanspaces.com | DigitalOcean Spaces Toronto 1 + | blr1.digitaloceanspaces.com | DigitalOcean Spaces Bangalore 1 --location-constraint Location constraint - must be set to match the Region. @@ -185,6 +183,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -224,6 +242,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -364,6 +385,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -376,7 +402,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -478,6 +504,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -536,18 +586,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/dreamhost.md b/docs/en/cli-reference/storage/create/s3/dreamhost.md index e7913d4f7..d2474621e 100644 --- a/docs/en/cli-reference/storage/create/s3/dreamhost.md +++ b/docs/en/cli-reference/storage/create/s3/dreamhost.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -180,6 +174,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -219,6 +233,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -359,6 +376,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -371,7 +393,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -473,6 +495,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -531,18 +577,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/exaba.md b/docs/en/cli-reference/storage/create/s3/exaba.md new file mode 100644 index 000000000..40420bd17 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/exaba.md @@ -0,0 +1,632 @@ +# Exaba Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 exaba - Exaba Object Storage + +USAGE: + singularity storage create s3 exaba [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/filelu.md b/docs/en/cli-reference/storage/create/s3/filelu.md new file mode 100644 index 000000000..ad17de35d --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/filelu.md @@ -0,0 +1,640 @@ +# FileLu S5 (S3-Compatible Object Storage) + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 filelu - FileLu S5 (S3-Compatible Object Storage) + +USAGE: + singularity storage create s3 filelu [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | global | Global + | us-east | North America (US-East) + | eu-central | Europe (EU-Central) + | ap-southeast | Asia Pacific (AP-Southeast) + | me-central | Middle East (ME-Central) + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s5lu.com | Global FileLu S5 endpoint + | us.s5lu.com | North America (US-East) region endpoint + | eu.s5lu.com | Europe (EU-Central) region endpoint + | ap.s5lu.com | Asia Pacific (AP-Southeast) region endpoint + | me.s5lu.com | Middle East (ME-Central) region endpoint + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/flashblade.md b/docs/en/cli-reference/storage/create/s3/flashblade.md new file mode 100644 index 000000000..bafa6ad2a --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/flashblade.md @@ -0,0 +1,581 @@ +# Pure Storage FlashBlade Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 flashblade - Pure Storage FlashBlade Object Storage + +USAGE: + singularity storage create s3 flashblade [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/gcs.md b/docs/en/cli-reference/storage/create/s3/gcs.md index 4a819c7bc..7c44b1726 100644 --- a/docs/en/cli-reference/storage/create/s3/gcs.md +++ b/docs/en/cli-reference/storage/create/s3/gcs.md @@ -33,14 +33,10 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint - Endpoint for Google Cloud Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | https://storage.googleapis.com | Google Cloud Storage endpoint @@ -178,6 +174,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -217,6 +233,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -357,6 +376,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -369,7 +393,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -471,6 +495,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -497,7 +545,7 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Google Cloud Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] @@ -529,18 +577,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/hetzner.md b/docs/en/cli-reference/storage/create/s3/hetzner.md new file mode 100644 index 000000000..1e131ee1c --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/hetzner.md @@ -0,0 +1,642 @@ +# Hetzner Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 hetzner - Hetzner Object Storage + +USAGE: + singularity storage create s3 hetzner [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | hel1 | Helsinki + | fsn1 | Falkenstein + | nbg1 | Nuremberg + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | hel1.your-objectstorage.com | Helsinki + | fsn1.your-objectstorage.com | Falkenstein + | nbg1.your-objectstorage.com | Nuremberg + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/huaweiobs.md b/docs/en/cli-reference/storage/create/s3/huaweiobs.md index fdc201d47..e775d89da 100644 --- a/docs/en/cli-reference/storage/create/s3/huaweiobs.md +++ b/docs/en/cli-reference/storage/create/s3/huaweiobs.md @@ -29,8 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: | af-south-1 | AF-Johannesburg @@ -50,7 +51,9 @@ DESCRIPTION: | ru-northwest-2 | RU-Moscow2 --endpoint - Endpoint for OBS API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | obs.af-south-1.myhuaweicloud.com | AF-Johannesburg @@ -197,6 +200,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -236,6 +259,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -376,6 +402,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -388,7 +419,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -490,6 +521,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -516,10 +571,10 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for OBS API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --region value Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint. [$REGION] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -547,18 +602,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/ibmcos.md b/docs/en/cli-reference/storage/create/s3/ibmcos.md index 0c2d08d49..5be46fc0a 100644 --- a/docs/en/cli-reference/storage/create/s3/ibmcos.md +++ b/docs/en/cli-reference/storage/create/s3/ibmcos.md @@ -33,16 +33,10 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint - Endpoint for IBM COS S3 API. + Endpoint for S3 API. - Specify if using an IBM COS On Premise. + Required when using an S3 clone. Examples: | s3.us.cloud-object-storage.appdomain.cloud | US Cross Region Endpoint @@ -71,11 +65,11 @@ DESCRIPTION: | s3.private.eu-de.cloud-object-storage.appdomain.cloud | EU Region DE Private Endpoint | s3.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Endpoint | s3.tok.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Tokyo Endpoint - | s3.hkg.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional HongKong Endpoint + | s3.hkg.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Hong Kong Endpoint | s3.seo.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Seoul Endpoint | s3.private.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Private Endpoint | s3.private.tok.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Tokyo Private Endpoint - | s3.private.hkg.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional HongKong Private Endpoint + | s3.private.hkg.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Hong Kong Private Endpoint | s3.private.seo.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Seoul Private Endpoint | s3.jp-tok.cloud-object-storage.appdomain.cloud | APAC Region Japan Endpoint | s3.private.jp-tok.cloud-object-storage.appdomain.cloud | APAC Region Japan Private Endpoint @@ -109,9 +103,9 @@ DESCRIPTION: | s3.private.sng01.cloud-object-storage.appdomain.cloud | Singapore Single Site Private Endpoint --location-constraint - Location constraint - must match endpoint when using IBM Cloud Public. + Location constraint - must be set to match the Region. - For on-prem COS, do not make a selection from this list, hit enter. + Leave blank if not sure. Used when creating buckets only. Examples: | us-standard | US Cross Region Standard @@ -290,6 +284,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -329,6 +343,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -469,6 +486,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -481,7 +503,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -583,6 +605,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -602,19 +648,27 @@ DESCRIPTION: use `-vv` to see the debug level logs. + --ibm-api-key + IBM API Key to be used to obtain IAM token + + --ibm-resource-instance-id + IBM service instance id + --description Description of the remote. OPTIONS: - --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] - --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for IBM COS S3 API. [$ENDPOINT] - --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] - --help, -h show help - --location-constraint value Location constraint - must match endpoint when using IBM Cloud Public. [$LOCATION_CONSTRAINT] - --region value Region to connect to. [$REGION] - --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --ibm-api-key value IBM API Key to be used to obtain IAM token [$IBM_API_KEY] + --ibm-resource-instance-id value IBM service instance id [$IBM_RESOURCE_INSTANCE_ID] + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -641,18 +695,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/idrive.md b/docs/en/cli-reference/storage/create/s3/idrive.md index e13e3639c..aafc31dc4 100644 --- a/docs/en/cli-reference/storage/create/s3/idrive.md +++ b/docs/en/cli-reference/storage/create/s3/idrive.md @@ -156,6 +156,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -195,6 +215,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -335,6 +358,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -347,7 +375,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -449,6 +477,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -504,18 +556,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/intercolo.md b/docs/en/cli-reference/storage/create/s3/intercolo.md new file mode 100644 index 000000000..a03bb71a0 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/intercolo.md @@ -0,0 +1,632 @@ +# Intercolo Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 intercolo - Intercolo Object Storage + +USAGE: + singularity storage create s3 intercolo [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | de-fra | Frankfurt, Germany + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | de-fra.i3storage.com | Frankfurt, Germany + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/ionos.md b/docs/en/cli-reference/storage/create/s3/ionos.md index ee3eef61d..ae60e92fb 100644 --- a/docs/en/cli-reference/storage/create/s3/ionos.md +++ b/docs/en/cli-reference/storage/create/s3/ionos.md @@ -29,18 +29,18 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - Region where your bucket will be created and your data stored. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: - | de | Frankfurt, Germany | eu-central-2 | Berlin, Germany | eu-south-2 | Logrono, Spain --endpoint - Endpoint for IONOS S3 Object Storage. + Endpoint for S3 API. - Specify the endpoint from the same region. + Required when using an S3 clone. Examples: | s3-eu-central-1.ionoscloud.com | Frankfurt, Germany @@ -175,6 +175,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -214,6 +234,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -354,6 +377,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -366,7 +394,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -468,6 +496,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -494,10 +546,10 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for IONOS S3 Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --region value Region where your bucket will be created and your data stored. [$REGION] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -525,18 +577,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/leviia.md b/docs/en/cli-reference/storage/create/s3/leviia.md index 6dd29ed20..d36eba05e 100644 --- a/docs/en/cli-reference/storage/create/s3/leviia.md +++ b/docs/en/cli-reference/storage/create/s3/leviia.md @@ -33,17 +33,15 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. Required when using an S3 clone. + Examples: + | s3.leviia.com | The default endpoint + | | Leviia + --acl Canned ACL used when creating buckets and storing or copying objects. @@ -172,6 +170,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -211,6 +229,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -351,6 +372,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -363,7 +389,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -465,6 +491,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -522,18 +572,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/liara.md b/docs/en/cli-reference/storage/create/s3/liara.md index e814d6421..544dc4316 100644 --- a/docs/en/cli-reference/storage/create/s3/liara.md +++ b/docs/en/cli-reference/storage/create/s3/liara.md @@ -29,7 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Liara Object Storage API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | storage.iran.liara.space | The default endpoint @@ -73,10 +75,7 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in Liara - - Examples: - | STANDARD | Standard storage class + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -169,6 +168,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -208,6 +227,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -348,6 +370,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -360,7 +387,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -462,6 +489,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -488,11 +539,11 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Liara Object Storage API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in Liara [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -519,18 +570,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/linode.md b/docs/en/cli-reference/storage/create/s3/linode.md index 7ac987071..ed3c861ca 100644 --- a/docs/en/cli-reference/storage/create/s3/linode.md +++ b/docs/en/cli-reference/storage/create/s3/linode.md @@ -29,19 +29,32 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Linode Object Storage API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: - | us-southeast-1.linodeobjects.com | Atlanta, GA (USA), us-southeast-1 - | us-ord-1.linodeobjects.com | Chicago, IL (USA), us-ord-1 - | eu-central-1.linodeobjects.com | Frankfurt (Germany), eu-central-1 - | it-mil-1.linodeobjects.com | Milan (Italy), it-mil-1 - | us-east-1.linodeobjects.com | Newark, NJ (USA), us-east-1 - | fr-par-1.linodeobjects.com | Paris (France), fr-par-1 - | us-sea-1.linodeobjects.com | Seattle, WA (USA), us-sea-1 - | ap-south-1.linodeobjects.com | Singapore ap-south-1 - | se-sto-1.linodeobjects.com | Stockholm (Sweden), se-sto-1 - | us-iad-1.linodeobjects.com | Washington, DC, (USA), us-iad-1 + | nl-ams-1.linodeobjects.com | Amsterdam, NL (nl-ams-1) + | us-southeast-1.linodeobjects.com | Atlanta, GA, US (us-southeast-1) + | in-maa-1.linodeobjects.com | Chennai, IN (in-maa-1) + | us-ord-1.linodeobjects.com | Chicago, IL, US (us-ord-1) + | eu-central-1.linodeobjects.com | Frankfurt, DE (eu-central-1) + | id-cgk-1.linodeobjects.com | Jakarta, ID (id-cgk-1) + | gb-lon-1.linodeobjects.com | London 2, UK (gb-lon-1) + | us-lax-1.linodeobjects.com | Los Angeles, CA, US (us-lax-1) + | es-mad-1.linodeobjects.com | Madrid, ES (es-mad-1) + | us-mia-1.linodeobjects.com | Miami, FL, US (us-mia-1) + | it-mil-1.linodeobjects.com | Milan, IT (it-mil-1) + | us-east-1.linodeobjects.com | Newark, NJ, US (us-east-1) + | jp-osa-1.linodeobjects.com | Osaka, JP (jp-osa-1) + | fr-par-1.linodeobjects.com | Paris, FR (fr-par-1) + | br-gru-1.linodeobjects.com | Sao Paulo, BR (br-gru-1) + | us-sea-1.linodeobjects.com | Seattle, WA, US (us-sea-1) + | ap-south-1.linodeobjects.com | Singapore, SG (ap-south-1) + | sg-sin-1.linodeobjects.com | Singapore 2, SG (sg-sin-1) + | se-sto-1.linodeobjects.com | Stockholm, SE (se-sto-1) + | jp-tyo-1.linodeobjects.com | Tokyo 3, JP (jp-tyo-1) + | us-iad-10.linodeobjects.com | Washington, DC, US (us-iad-10) --acl Canned ACL used when creating buckets and storing or copying objects. @@ -171,6 +184,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -210,6 +243,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -350,6 +386,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -362,7 +403,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -464,6 +505,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -490,7 +555,7 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Linode Object Storage API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] @@ -520,18 +585,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/lyvecloud.md b/docs/en/cli-reference/storage/create/s3/lyvecloud.md index c2f8799c1..9b2c5bc91 100644 --- a/docs/en/cli-reference/storage/create/s3/lyvecloud.md +++ b/docs/en/cli-reference/storage/create/s3/lyvecloud.md @@ -33,21 +33,14 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. Required when using an S3 clone. Examples: - | s3.us-east-1.lyvecloud.seagate.com | Seagate Lyve Cloud US East 1 (Virginia) - | s3.us-west-1.lyvecloud.seagate.com | Seagate Lyve Cloud US West 1 (California) - | s3.ap-southeast-1.lyvecloud.seagate.com | Seagate Lyve Cloud AP Southeast 1 (Singapore) + | s3.us-west-1.{account_name}.lyve.seagate.com | US West 1 - California + | s3.eu-west-1.{account_name}.lyve.seagate.com | EU West 1 - Ireland --location-constraint Location constraint - must be set to match the Region. @@ -182,6 +175,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -221,6 +234,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -361,6 +377,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -373,7 +394,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -475,6 +496,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -533,18 +578,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/magalu.md b/docs/en/cli-reference/storage/create/s3/magalu.md index a0ba72d02..2436c871c 100644 --- a/docs/en/cli-reference/storage/create/s3/magalu.md +++ b/docs/en/cli-reference/storage/create/s3/magalu.md @@ -34,8 +34,8 @@ DESCRIPTION: Required when using an S3 clone. Examples: - | br-se1.magaluobjects.com | Magalu BR Southeast 1 endpoint - | br-ne1.magaluobjects.com | Magalu BR Northeast 1 endpoint + | br-se1.magaluobjects.com | São Paulo, SP (BR), br-se1 + | br-ne1.magaluobjects.com | Fortaleza, CE (BR), br-ne1 --acl Canned ACL used when creating buckets and storing or copying objects. @@ -75,10 +75,7 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in Magalu. - - Examples: - | STANDARD | Standard storage class + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -171,6 +168,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -210,6 +227,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -350,6 +370,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -362,7 +387,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -464,6 +489,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -494,7 +543,7 @@ OPTIONS: --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in Magalu. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -521,18 +570,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/mega.md b/docs/en/cli-reference/storage/create/s3/mega.md new file mode 100644 index 000000000..d88c9d876 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/mega.md @@ -0,0 +1,611 @@ +# MEGA S4 Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 mega - MEGA S4 Object Storage + +USAGE: + singularity storage create s3 mega [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.eu-central-1.s4.mega.io | Mega S4 eu-central-1 (Amsterdam) + | s3.eu-central-2.s4.mega.io | Mega S4 eu-central-2 (Bettembourg) + | s3.ca-central-1.s4.mega.io | Mega S4 ca-central-1 (Montreal) + | s3.ca-west-1.s4.mega.io | Mega S4 ca-west-1 (Vancouver) + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/minio.md b/docs/en/cli-reference/storage/create/s3/minio.md index c09affbd8..2b6df457d 100644 --- a/docs/en/cli-reference/storage/create/s3/minio.md +++ b/docs/en/cli-reference/storage/create/s3/minio.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -89,10 +83,6 @@ DESCRIPTION: --server-side-encryption The server-side encryption algorithm used when storing this object in S3. - Examples: - | | None - | AES256 | AES256 - --sse-customer-algorithm If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -223,6 +213,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -262,6 +272,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -402,6 +415,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -414,7 +432,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -516,6 +534,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -576,9 +618,14 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] @@ -587,11 +634,14 @@ OPTIONS: --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/netease.md b/docs/en/cli-reference/storage/create/s3/netease.md index a8ccaf880..bf90d8dd4 100644 --- a/docs/en/cli-reference/storage/create/s3/netease.md +++ b/docs/en/cli-reference/storage/create/s3/netease.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -177,6 +171,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -216,6 +230,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -356,6 +373,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -368,7 +390,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -470,6 +492,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -528,18 +574,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/other.md b/docs/en/cli-reference/storage/create/s3/other.md index 00462e42c..98f532c07 100644 --- a/docs/en/cli-reference/storage/create/s3/other.md +++ b/docs/en/cli-reference/storage/create/s3/other.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -177,6 +171,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -216,6 +230,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -356,6 +373,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -368,7 +390,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -470,6 +492,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -528,18 +574,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/outscale.md b/docs/en/cli-reference/storage/create/s3/outscale.md new file mode 100644 index 000000000..30044181c --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/outscale.md @@ -0,0 +1,640 @@ +# OUTSCALE Object Storage (OOS) + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 outscale - OUTSCALE Object Storage (OOS) + +USAGE: + singularity storage create s3 outscale [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | eu-west-2 | Paris, France + | us-east-2 | New Jersey, USA + | us-west-1 | California, USA + | cloudgouv-eu-west-1 | SecNumCloud, Paris, France + | ap-northeast-1 | Tokyo, Japan + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | oos.eu-west-2.outscale.com | Outscale EU West 2 (Paris) + | oos.us-east-2.outscale.com | Outscale US east 2 (New Jersey) + | oos.us-west-1.outscale.com | Outscale EU West 1 (California) + | oos.cloudgouv-eu-west-1.outscale.com | Outscale SecNumCloud (Paris) + | oos.ap-northeast-1.outscale.com | Outscale AP Northeast 1 (Japan) + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/ovhcloud.md b/docs/en/cli-reference/storage/create/s3/ovhcloud.md new file mode 100644 index 000000000..1b8d75ace --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/ovhcloud.md @@ -0,0 +1,659 @@ +# OVHcloud Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 ovhcloud - OVHcloud Object Storage + +USAGE: + singularity storage create s3 ovhcloud [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | gra | Gravelines, France + | rbx | Roubaix, France + | sbg | Strasbourg, France + | eu-west-par | Paris, France (3AZ) + | uk | London, United Kingdom + | waw | Warsaw, Poland + | bhs | Beauharnois, Canada + | ca-east-tor | Toronto, Canada + | sgp | Singapore + | ap-southeast-syd | Sydney, Australia + | ap-south-mum | Mumbai, India + | us-east-va | Vint Hill, Virginia, USA + | us-west-or | Hillsboro, Oregon, USA + | rbx-archive | Roubaix, France (Cold Archive) + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.gra.io.cloud.ovh.net | OVHcloud Gravelines, France + | s3.rbx.io.cloud.ovh.net | OVHcloud Roubaix, France + | s3.sbg.io.cloud.ovh.net | OVHcloud Strasbourg, France + | s3.eu-west-par.io.cloud.ovh.net | OVHcloud Paris, France (3AZ) + | s3.de.io.cloud.ovh.net | OVHcloud Frankfurt, Germany + | s3.uk.io.cloud.ovh.net | OVHcloud London, United Kingdom + | s3.waw.io.cloud.ovh.net | OVHcloud Warsaw, Poland + | s3.bhs.io.cloud.ovh.net | OVHcloud Beauharnois, Canada + | s3.ca-east-tor.io.cloud.ovh.net | OVHcloud Toronto, Canada + | s3.sgp.io.cloud.ovh.net | OVHcloud Singapore + | s3.ap-southeast-syd.io.cloud.ovh.net | OVHcloud Sydney, Australia + | s3.ap-south-mum.io.cloud.ovh.net | OVHcloud Mumbai, India + | s3.us-east-va.io.cloud.ovh.us | OVHcloud Vint Hill, Virginia, USA + | s3.us-west-or.io.cloud.ovh.us | OVHcloud Hillsboro, Oregon, USA + | s3.rbx-archive.io.cloud.ovh.net | OVHcloud Roubaix, France (Cold Archive) + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/petabox.md b/docs/en/cli-reference/storage/create/s3/petabox.md index 1fc88e0c9..467bd89fe 100644 --- a/docs/en/cli-reference/storage/create/s3/petabox.md +++ b/docs/en/cli-reference/storage/create/s3/petabox.md @@ -29,20 +29,20 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - Region where your bucket will be created and your data stored. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: - | us-east-1 | US East (N. Virginia) | eu-central-1 | Europe (Frankfurt) | ap-southeast-1 | Asia Pacific (Singapore) | me-south-1 | Middle East (Bahrain) | sa-east-1 | South America (São Paulo) --endpoint - Endpoint for Petabox S3 Object Storage. + Endpoint for S3 API. - Specify the endpoint from the same region. + Required when using an S3 clone. Examples: | s3.petabox.io | US East (N. Virginia) @@ -180,6 +180,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -219,6 +239,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -359,6 +382,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -371,7 +399,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -473,6 +501,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -499,10 +551,10 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Petabox S3 Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --region value Region where your bucket will be created and your data stored. [$REGION] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -530,18 +582,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/qiniu.md b/docs/en/cli-reference/storage/create/s3/qiniu.md index f8b38875c..1d50fb3ff 100644 --- a/docs/en/cli-reference/storage/create/s3/qiniu.md +++ b/docs/en/cli-reference/storage/create/s3/qiniu.md @@ -30,6 +30,8 @@ DESCRIPTION: --region Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. Examples: | cn-east-1 | The default endpoint - a good choice if you are unsure. @@ -49,7 +51,9 @@ DESCRIPTION: | | Needs location constraint ap-northeast-1. --endpoint - Endpoint for Qiniu Object Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3-cn-east-1.qiniucs.com | East China Endpoint 1 @@ -63,7 +67,7 @@ DESCRIPTION: --location-constraint Location constraint - must be set to match the Region. - Used when creating buckets only. + Leave blank if not sure. Used when creating buckets only. Examples: | cn-east-1 | East China Region 1 @@ -112,12 +116,10 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in Qiniu. + The storage class to use when storing new objects in S3. Examples: - | STANDARD | Standard storage class | LINE | Infrequent access storage mode - | GLACIER | Archive storage mode | DEEP_ARCHIVE | Deep archive storage mode --upload-cutoff @@ -211,6 +213,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -250,6 +272,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -390,6 +415,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -402,7 +432,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -504,6 +534,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -530,13 +584,13 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Qiniu Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in Qiniu. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -563,18 +617,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/rabata.md b/docs/en/cli-reference/storage/create/s3/rabata.md new file mode 100644 index 000000000..5efca52c5 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/rabata.md @@ -0,0 +1,607 @@ +# Rabata Cloud Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 rabata - Rabata Cloud Storage + +USAGE: + singularity storage create s3 rabata [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | eu-west-1 | EU (Ireland) + | eu-west-2 | EU (London) + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.us-east-1.rabata.io | US East (N. Virginia) + | s3.eu-west-1.rabata.io | EU West (Ireland) + | s3.eu-west-2.rabata.io | EU West (London) + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + Examples: + | us-east-1 | US East (N. Virginia) + | eu-west-1 | EU (Ireland) + | eu-west-2 | EU (London) + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/rackcorp.md b/docs/en/cli-reference/storage/create/s3/rackcorp.md index cf48c86d5..689951a41 100644 --- a/docs/en/cli-reference/storage/create/s3/rackcorp.md +++ b/docs/en/cli-reference/storage/create/s3/rackcorp.md @@ -29,8 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - region - the location where your bucket will be created and your data stored. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: | global | Global CDN (All locations) Region @@ -54,7 +55,9 @@ DESCRIPTION: | nz | Auckland (New Zealand) Region --endpoint - Endpoint for RackCorp Object Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3.rackcorp.com | Global (AnyCast) Endpoint @@ -78,8 +81,9 @@ DESCRIPTION: | nz.s3.rackcorp.com | Auckland (New Zealand) Endpoint --location-constraint - Location constraint - the location where your bucket will be located and your data stored. + Location constraint - must be set to match the Region. + Leave blank if not sure. Used when creating buckets only. Examples: | global | Global CDN Region @@ -99,7 +103,7 @@ DESCRIPTION: | de | Frankfurt (Germany) Region | us | USA (AnyCast) Region | us-east-1 | New York (USA) Region - | us-west-1 | Freemont (USA) Region + | us-west-1 | Fremont (USA) Region | nz | Auckland (New Zealand) Region --acl @@ -230,6 +234,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -269,6 +293,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -409,6 +436,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -421,7 +453,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -523,6 +555,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -549,11 +605,11 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for RackCorp Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --location-constraint value Location constraint - the location where your bucket will be located and your data stored. [$LOCATION_CONSTRAINT] - --region value region - the location where your bucket will be created and your data stored. [$REGION] + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -581,18 +637,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/rclone.md b/docs/en/cli-reference/storage/create/s3/rclone.md index bca8fbf59..bfb3e7a7b 100644 --- a/docs/en/cli-reference/storage/create/s3/rclone.md +++ b/docs/en/cli-reference/storage/create/s3/rclone.md @@ -28,64 +28,11 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. - --region - Region to connect to. - - Leave blank if you are using an S3 clone and you don't have a region. - - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. Required when using an S3 clone. - --location-constraint - Location constraint - must be set to match the Region. - - Leave blank if not sure. Used when creating buckets only. - - --acl - Canned ACL used when creating buckets and storing or copying objects. - - This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when server-side copying objects as S3 - doesn't copy the ACL from the source but rather writes a fresh one. - - If the acl is an empty string then no X-Amz-Acl: header is added and - the default (private) will be used. - - - --bucket-acl - Canned ACL used when creating buckets. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when only when creating buckets. If it - isn't set then "acl" is used instead. - - If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: - header is added and the default (private) will be used. - - - Examples: - | private | Owner gets FULL_CONTROL. - | | No one else has access rights (default). - | public-read | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ access. - | public-read-write | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ and WRITE access. - | | Granting this on a bucket is generally not recommended. - | authenticated-read | Owner gets FULL_CONTROL. - | | The AuthenticatedUsers group gets READ access. - --upload-cutoff Cutoff for switching to chunked upload. @@ -177,6 +124,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -216,6 +183,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -356,6 +326,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -368,7 +343,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -470,6 +445,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -494,18 +493,14 @@ DESCRIPTION: OPTIONS: - --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] - --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for S3 API. [$ENDPOINT] - --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] - --help, -h show help - --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] - --region value Region to connect to. [$REGION] - --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] @@ -528,18 +523,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/scaleway.md b/docs/en/cli-reference/storage/create/s3/scaleway.md index 6d0dfc16f..f93b82cc2 100644 --- a/docs/en/cli-reference/storage/create/s3/scaleway.md +++ b/docs/en/cli-reference/storage/create/s3/scaleway.md @@ -30,6 +30,8 @@ DESCRIPTION: --region Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. Examples: | nl-ams | Amsterdam, The Netherlands @@ -37,7 +39,9 @@ DESCRIPTION: | pl-waw | Warsaw, Poland --endpoint - Endpoint for Scaleway Object Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3.nl-ams.scw.cloud | Amsterdam Endpoint @@ -187,6 +191,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -226,6 +250,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -366,6 +393,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -378,7 +410,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -480,6 +512,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -506,7 +562,7 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Scaleway Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --region value Region to connect to. [$REGION] @@ -538,18 +594,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/seaweedfs.md b/docs/en/cli-reference/storage/create/s3/seaweedfs.md index 7358220e6..dac90050e 100644 --- a/docs/en/cli-reference/storage/create/s3/seaweedfs.md +++ b/docs/en/cli-reference/storage/create/s3/seaweedfs.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -180,6 +174,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -219,6 +233,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -359,6 +376,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -371,7 +393,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -473,6 +495,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -531,18 +577,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/selectel.md b/docs/en/cli-reference/storage/create/s3/selectel.md new file mode 100644 index 000000000..19cb2ed05 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/selectel.md @@ -0,0 +1,598 @@ +# Selectel Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 selectel - Selectel Object Storage + +USAGE: + singularity storage create s3 selectel [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | ru-3 | St. Petersburg + | kz-1 | Kazakhstan + | uz-2 | Uzbekistan + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.ru-1.storage.selcloud.ru | St. Petersburg + | s3.ru-3.storage.selcloud.ru | St. Petersburg + | s3.kz-1.storage.selcloud.ru | Kazakhstan + | s3.uz-2.storage.selcloud.ru | Uzbekistan + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/servercore.md b/docs/en/cli-reference/storage/create/s3/servercore.md new file mode 100644 index 000000000..1f7b07963 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/servercore.md @@ -0,0 +1,620 @@ +# Servercore Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 servercore - Servercore Object Storage + +USAGE: + singularity storage create s3 servercore [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | uz-2 | Tashkent, Uzbekistan + | kz-1 | Almaty, Kazakhstan + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.ru-1.storage.selcloud.ru | Saint Petersburg + | s3.uz-2.srvstorage.uz | Tashkent, Uzbekistan + | s3.kz-1.srvstorage.kz | Almaty, Kazakhstan + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/spectralogic.md b/docs/en/cli-reference/storage/create/s3/spectralogic.md new file mode 100644 index 000000000..593f11e50 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/spectralogic.md @@ -0,0 +1,581 @@ +# Spectra Logic Black Pearl + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 spectralogic - Spectra Logic Black Pearl + +USAGE: + singularity storage create s3 spectralogic [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/stackpath.md b/docs/en/cli-reference/storage/create/s3/stackpath.md index bcb5ccd05..0c07f1a00 100644 --- a/docs/en/cli-reference/storage/create/s3/stackpath.md +++ b/docs/en/cli-reference/storage/create/s3/stackpath.md @@ -33,14 +33,10 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint - Endpoint for StackPath Object Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3.us-east-2.stackpathstorage.com | US East Endpoint @@ -175,6 +171,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -214,6 +230,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -354,6 +373,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -366,7 +390,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -468,6 +492,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -494,7 +542,7 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for StackPath Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --region value Region to connect to. [$REGION] @@ -525,18 +573,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/storj.md b/docs/en/cli-reference/storage/create/s3/storj.md index 689b4bd6b..e0340a135 100644 --- a/docs/en/cli-reference/storage/create/s3/storj.md +++ b/docs/en/cli-reference/storage/create/s3/storj.md @@ -29,33 +29,12 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Storj Gateway. - - Examples: - | gateway.storjshare.io | Global Hosted Gateway - - --bucket-acl - Canned ACL used when creating buckets. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when only when creating buckets. If it - isn't set then "acl" is used instead. - - If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: - header is added and the default (private) will be used. + Endpoint for S3 API. + Required when using an S3 clone. Examples: - | private | Owner gets FULL_CONTROL. - | | No one else has access rights (default). - | public-read | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ access. - | public-read-write | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ and WRITE access. - | | Granting this on a bucket is generally not recommended. - | authenticated-read | Owner gets FULL_CONTROL. - | | The AuthenticatedUsers group gets READ access. + | gateway.storjshare.io | Global Hosted Gateway --upload-cutoff Cutoff for switching to chunked upload. @@ -148,6 +127,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -187,6 +186,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -327,6 +329,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -339,7 +346,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -441,6 +448,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -466,14 +497,13 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] - --endpoint value Endpoint for Storj Gateway. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] @@ -496,18 +526,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/synology.md b/docs/en/cli-reference/storage/create/s3/synology.md index 358f2fa8d..da28b0490 100644 --- a/docs/en/cli-reference/storage/create/s3/synology.md +++ b/docs/en/cli-reference/storage/create/s3/synology.md @@ -29,8 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - Region where your data stored. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: | eu-001 | Europe Region 1 @@ -40,7 +41,9 @@ DESCRIPTION: | tw-001 | Asia (Taiwan) --endpoint - Endpoint for Synology C2 Object Storage API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | eu-001.s3.synologyc2.net | EU Endpoint 1 @@ -54,29 +57,6 @@ DESCRIPTION: Leave blank if not sure. Used when creating buckets only. - --bucket-acl - Canned ACL used when creating buckets. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when only when creating buckets. If it - isn't set then "acl" is used instead. - - If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: - header is added and the default (private) will be used. - - - Examples: - | private | Owner gets FULL_CONTROL. - | | No one else has access rights (default). - | public-read | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ access. - | public-read-write | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ and WRITE access. - | | Granting this on a bucket is generally not recommended. - | authenticated-read | Owner gets FULL_CONTROL. - | | The AuthenticatedUsers group gets READ access. - --upload-cutoff Cutoff for switching to chunked upload. @@ -168,6 +148,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -207,6 +207,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -347,6 +350,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -359,7 +367,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -461,6 +469,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -486,16 +518,15 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] - --endpoint value Endpoint for Synology C2 Object Storage API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] - --region value Region where your data stored. [$REGION] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] @@ -518,18 +549,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/tencentcos.md b/docs/en/cli-reference/storage/create/s3/tencentcos.md index 9ecc4ddf3..d62839d6e 100644 --- a/docs/en/cli-reference/storage/create/s3/tencentcos.md +++ b/docs/en/cli-reference/storage/create/s3/tencentcos.md @@ -29,14 +29,15 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Tencent COS API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | cos.ap-beijing.myqcloud.com | Beijing Region | cos.ap-nanjing.myqcloud.com | Nanjing Region | cos.ap-shanghai.myqcloud.com | Shanghai Region | cos.ap-guangzhou.myqcloud.com | Guangzhou Region - | cos.ap-nanjing.myqcloud.com | Nanjing Region | cos.ap-chengdu.myqcloud.com | Chengdu Region | cos.ap-chongqing.myqcloud.com | Chongqing Region | cos.ap-hongkong.myqcloud.com | Hong Kong (China) Region @@ -94,13 +95,10 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in Tencent COS. + The storage class to use when storing new objects in S3. Examples: - | | Default - | STANDARD | Standard storage class - | ARCHIVE | Archive storage mode - | STANDARD_IA | Infrequent access storage mode + | ARCHIVE | Archive storage mode --upload-cutoff Cutoff for switching to chunked upload. @@ -193,6 +191,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -232,6 +250,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -372,6 +393,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -384,7 +410,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -486,6 +512,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -512,11 +562,11 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Tencent COS API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in Tencent COS. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -543,18 +593,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/wasabi.md b/docs/en/cli-reference/storage/create/s3/wasabi.md index 090c55877..e423bea34 100644 --- a/docs/en/cli-reference/storage/create/s3/wasabi.md +++ b/docs/en/cli-reference/storage/create/s3/wasabi.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -54,6 +48,7 @@ DESCRIPTION: | s3.eu-central-2.wasabisys.com | Wasabi EU Central 2 (Frankfurt) | s3.eu-west-1.wasabisys.com | Wasabi EU West 1 (London) | s3.eu-west-2.wasabisys.com | Wasabi EU West 2 (Paris) + | s3.eu-south-1.wasabisys.com | Wasabi EU South 1 (Milan) | s3.ap-northeast-1.wasabisys.com | Wasabi AP Northeast 1 (Tokyo) endpoint | s3.ap-northeast-2.wasabisys.com | Wasabi AP Northeast 2 (Osaka) endpoint | s3.ap-southeast-1.wasabisys.com | Wasabi AP Southeast 1 (Singapore) @@ -192,6 +187,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -231,6 +246,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -371,6 +389,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -383,7 +406,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -485,6 +508,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -543,18 +590,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/create/s3/zata.md b/docs/en/cli-reference/storage/create/s3/zata.md new file mode 100644 index 000000000..4f56a4605 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/zata.md @@ -0,0 +1,638 @@ +# Zata (S3 compatible Gateway) + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 zata - Zata (S3 compatible Gateway) + +USAGE: + singularity storage create s3 zata [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | us-east-1 | Indore, Madhya Pradesh, India + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | idr01.zata.ai | South Asia Endpoint + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/seafile.md b/docs/en/cli-reference/storage/create/seafile.md index 3b1d30ee2..7cdef8485 100644 --- a/docs/en/cli-reference/storage/create/seafile.md +++ b/docs/en/cli-reference/storage/create/seafile.md @@ -63,7 +63,7 @@ OPTIONS: --create-library Should rclone create a library if it doesn't exist. (default: false) [$CREATE_LIBRARY] --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8") [$ENCODING] + --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8,Dot") [$ENCODING] Client Config diff --git a/docs/en/cli-reference/storage/create/sftp.md b/docs/en/cli-reference/storage/create/sftp.md index 1cbec753d..26f92a393 100644 --- a/docs/en/cli-reference/storage/create/sftp.md +++ b/docs/en/cli-reference/storage/create/sftp.md @@ -49,6 +49,11 @@ DESCRIPTION: Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys in the new OpenSSH format can't be used. + --pubkey + SSH public certificate for public certificate based authentication. + Set this if you have a signed certificate you want to use for authentication. + If specified will override pubkey_file. + --pubkey-file Optional path to public key file. @@ -147,13 +152,41 @@ DESCRIPTION: | powershell | PowerShell | cmd | Windows Command Prompt + --hashes + Comma separated list of supported checksum types. + --md5sum-command - The command used to read md5 hashes. + The command used to read MD5 hashes. Leave blank for autodetect. --sha1sum-command - The command used to read sha1 hashes. + The command used to read SHA-1 hashes. + + Leave blank for autodetect. + + --crc32sum-command + The command used to read CRC-32 hashes. + + Leave blank for autodetect. + + --sha256sum-command + The command used to read SHA-256 hashes. + + Leave blank for autodetect. + + --blake3sum-command + The command used to read BLAKE3 hashes. + + Leave blank for autodetect. + + --xxh3sum-command + The command used to read XXH3 hashes. + + Leave blank for autodetect. + + --xxh128sum-command + The command used to read XXH128 hashes. Leave blank for autodetect. @@ -375,6 +408,18 @@ DESCRIPTION: myUser:myPass@localhost:9005 + --http-proxy + URL for HTTP CONNECT proxy + + Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. + + Supports the format http://user:pass@host:port, http://host:port, http://host. + + Example: + + http://myUser:myPass@proxyhostname.example.com:8000 + + --copy-is-hardlink Set to enable server side copies using hardlinks. @@ -407,6 +452,7 @@ OPTIONS: --key-use-agent When set forces the usage of the ssh-agent. (default: false) [$KEY_USE_AGENT] --pass value SSH password, leave blank to use ssh-agent. [$PASS] --port value SSH port number. (default: 22) [$PORT] + --pubkey value SSH public certificate for public certificate based authentication. [$PUBKEY] --pubkey-file value Optional path to public key file. [$PUBKEY_FILE] --ssh value Path and arguments to external ssh binary. [$SSH] --use-insecure-cipher Enable the use of insecure ciphers and key exchange methods. (default: false) [$USE_INSECURE_CIPHER] @@ -415,30 +461,37 @@ OPTIONS: Advanced --ask-password Allow asking for SFTP password when needed. (default: false) [$ASK_PASSWORD] + --blake3sum-command value The command used to read BLAKE3 hashes. [$BLAKE3SUM_COMMAND] --chunk-size value Upload and download chunk size. (default: "32Ki") [$CHUNK_SIZE] --ciphers value Space separated list of ciphers to be used for session encryption, ordered by preference. [$CIPHERS] --concurrency value The maximum number of outstanding requests for one file (default: 64) [$CONCURRENCY] --connections value Maximum number of SFTP simultaneous connections, 0 for unlimited. (default: 0) [$CONNECTIONS] --copy-is-hardlink Set to enable server side copies using hardlinks. (default: false) [$COPY_IS_HARDLINK] + --crc32sum-command value The command used to read CRC-32 hashes. [$CRC32SUM_COMMAND] --description value Description of the remote. [$DESCRIPTION] --disable-concurrent-reads If set don't use concurrent reads. (default: false) [$DISABLE_CONCURRENT_READS] --disable-concurrent-writes If set don't use concurrent writes. (default: false) [$DISABLE_CONCURRENT_WRITES] + --hashes value Comma separated list of supported checksum types. [$HASHES] --host-key-algorithms value Space separated list of host key algorithms, ordered by preference. [$HOST_KEY_ALGORITHMS] + --http-proxy value URL for HTTP CONNECT proxy [$HTTP_PROXY] --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] --key-exchange value Space separated list of key exchange algorithms, ordered by preference. [$KEY_EXCHANGE] --known-hosts-file value Optional path to known_hosts file. [$KNOWN_HOSTS_FILE] --macs value Space separated list of MACs (message authentication code) algorithms, ordered by preference. [$MACS] - --md5sum-command value The command used to read md5 hashes. [$MD5SUM_COMMAND] + --md5sum-command value The command used to read MD5 hashes. [$MD5SUM_COMMAND] --path-override value Override path used by SSH shell commands. [$PATH_OVERRIDE] --server-command value Specifies the path or command to run a sftp server on the remote host. [$SERVER_COMMAND] --set-env value Environment variables to pass to sftp and commands [$SET_ENV] --set-modtime Set the modified time on the remote if set. (default: true) [$SET_MODTIME] - --sha1sum-command value The command used to read sha1 hashes. [$SHA1SUM_COMMAND] + --sha1sum-command value The command used to read SHA-1 hashes. [$SHA1SUM_COMMAND] + --sha256sum-command value The command used to read SHA-256 hashes. [$SHA256SUM_COMMAND] --shell-type value The type of SSH shell on remote server, if any. [$SHELL_TYPE] --skip-links Set to skip any symlinks and any other non regular files. (default: false) [$SKIP_LINKS] --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] --subsystem value Specifies the SSH2 subsystem on the remote host. (default: "sftp") [$SUBSYSTEM] --use-fstat If set use fstat instead of stat. (default: false) [$USE_FSTAT] + --xxh128sum-command value The command used to read XXH128 hashes. [$XXH128SUM_COMMAND] + --xxh3sum-command value The command used to read XXH3 hashes. [$XXH3SUM_COMMAND] Client Config diff --git a/docs/en/cli-reference/storage/create/sharefile.md b/docs/en/cli-reference/storage/create/sharefile.md index 8429f03b2..8dbf5a84f 100644 --- a/docs/en/cli-reference/storage/create/sharefile.md +++ b/docs/en/cli-reference/storage/create/sharefile.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --upload-cutoff Cutoff for switching to multipart upload. @@ -84,6 +91,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Upload chunk size. (default: "64Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] --endpoint value Endpoint for API calls. [$ENDPOINT] diff --git a/docs/en/cli-reference/storage/create/smb.md b/docs/en/cli-reference/storage/create/smb.md index 27b20ab82..26a630af0 100644 --- a/docs/en/cli-reference/storage/create/smb.md +++ b/docs/en/cli-reference/storage/create/smb.md @@ -37,6 +37,15 @@ DESCRIPTION: Leave blank if not sure. + --use-kerberos + Use Kerberos authentication. + + If set, rclone will use Kerberos authentication instead of NTLM. This + requires a valid Kerberos configuration and credentials cache to be + available, either in the default locations or as specified by the + KRB5_CONFIG and KRB5CCNAME environment variables. + + --idle-timeout Max time before closing idle connections. @@ -54,6 +63,19 @@ DESCRIPTION: Always true on Windows shares. + --kerberos-ccache + Path to the Kerberos credential cache (krb5cc). + + Overrides the default KRB5CCNAME environment variable and allows this + instance of the SMB backend to use a different Kerberos cache file. + This is useful when mounting multiple SMB with different credentials + or running in multi-user environments. + + Supported formats: + - FILE:/path/to/ccache – Use the specified file. + - DIR:/path/to/ccachedir – Use the primary file inside the specified directory. + - /path/to/ccache – Interpreted as a file path. + --encoding The encoding for the backend. @@ -70,15 +92,17 @@ OPTIONS: --pass value SMB password. [$PASS] --port value SMB port number. (default: 445) [$PORT] --spn value Service principal name. [$SPN] + --use-kerberos Use Kerberos authentication. (default: false) [$USE_KERBEROS] --user value SMB username. (default: "$USER") [$USER] Advanced - --case-insensitive Whether the server is configured to be case-insensitive. (default: true) [$CASE_INSENSITIVE] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] - --hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access. (default: true) [$HIDE_SPECIAL_SHARE] - --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] + --case-insensitive Whether the server is configured to be case-insensitive. (default: true) [$CASE_INSENSITIVE] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] + --hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access. (default: true) [$HIDE_SPECIAL_SHARE] + --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] + --kerberos-ccache value Path to the Kerberos credential cache (krb5cc). [$KERBEROS_CCACHE] Client Config diff --git a/docs/en/cli-reference/storage/create/swift.md b/docs/en/cli-reference/storage/create/swift.md index 212622b39..21570abd3 100644 --- a/docs/en/cli-reference/storage/create/swift.md +++ b/docs/en/cli-reference/storage/create/swift.md @@ -126,7 +126,7 @@ DESCRIPTION: --chunk-size Above this size files will be chunked. - Above this size files will be chunked into a a `_segments` container + Above this size files will be chunked into a `_segments` container or a `.file-segments` directory. (See the `use_segments_container` option for more info). Default for this is 5 GiB which is its maximum value, which means only files above this size will be chunked. diff --git a/docs/en/cli-reference/storage/create/uptobox.md b/docs/en/cli-reference/storage/create/uptobox.md deleted file mode 100644 index 6dcca5eec..000000000 --- a/docs/en/cli-reference/storage/create/uptobox.md +++ /dev/null @@ -1,69 +0,0 @@ -# Uptobox - -{% code fullWidth="true" %} -``` -NAME: - singularity storage create uptobox - Uptobox - -USAGE: - singularity storage create uptobox [command options] - -DESCRIPTION: - --access-token - Your access token. - - Get it from https://uptobox.com/my_account. - - --private - Set to make uploaded files private - - --encoding - The encoding for the backend. - - See the [encoding section in the overview](/overview/#encoding) for more info. - - --description - Description of the remote. - - -OPTIONS: - --access-token value Your access token. [$ACCESS_TOKEN] - --help, -h show help - - Advanced - - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] - --private Set to make uploaded files private (default: false) [$PRIVATE] - - Client Config - - --client-ca-cert value Path to CA certificate used to verify servers - --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth - --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) - --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) - --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) - --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) - --client-key value Path to Client SSL private key (PEM) for mutual TLS auth - --client-no-gzip Don't set Accept-Encoding: gzip (default: false) - --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) - --client-timeout value IO idle timeout (default: 5m0s) - --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone default) - - General - - --name value Name of the storage (default: Auto generated) - --path value Path of the storage - - Retry Strategy - - --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) - --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) - --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) - --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) - --client-retry-max value Max number of retries for IO read errors (default: 10) - --client-skip-inaccessible Skip inaccessible files when opening (default: false) - -``` -{% endcode %} diff --git a/docs/en/cli-reference/storage/create/webdav.md b/docs/en/cli-reference/storage/create/webdav.md index 61570883e..4fb27540a 100644 --- a/docs/en/cli-reference/storage/create/webdav.md +++ b/docs/en/cli-reference/storage/create/webdav.md @@ -20,7 +20,8 @@ DESCRIPTION: Examples: | fastmail | Fastmail Files | nextcloud | Nextcloud - | owncloud | Owncloud + | owncloud | Owncloud 10 PHP based WebDAV server + | infinitescale | ownCloud Infinite Scale | sharepoint | Sharepoint Online, authenticated by Microsoft account | sharepoint-ntlm | Sharepoint with NTLM authentication, usually self-hosted or on-premises | rclone | rclone WebDAV server to serve a remote over HTTP via the WebDAV protocol @@ -81,6 +82,21 @@ DESCRIPTION: --unix-socket Path to a unix domain socket to dial to, instead of opening a TCP connection directly + --auth-redirect + Preserve authentication on redirect. + + If the server redirects rclone to a new domain when it is trying to + read a file then normally rclone will drop the Authorization: header + from the request. + + This is standard security practice to avoid sending your credentials + to an unknown webserver. + + However this is desirable in some circumstances. If you are getting + an error like "401 Unauthorized" when rclone is attempting to read + files from the webdav server then you can try this option. + + --description Description of the remote. @@ -95,6 +111,7 @@ OPTIONS: Advanced + --auth-redirect Preserve authentication on redirect. (default: false) [$AUTH_REDIRECT] --bearer-token-command value Command to run to get a bearer token. [$BEARER_TOKEN_COMMAND] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. [$ENCODING] diff --git a/docs/en/cli-reference/storage/create/yandex.md b/docs/en/cli-reference/storage/create/yandex.md index 1a059e42e..7d5118c9f 100644 --- a/docs/en/cli-reference/storage/create/yandex.md +++ b/docs/en/cli-reference/storage/create/yandex.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --hard-delete Delete files permanently rather than putting them into the trash. @@ -54,13 +61,14 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] - --spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance. (default: true) [$SPOOF_UA] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] + --spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance. (default: true) [$SPOOF_UA] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config diff --git a/docs/en/cli-reference/storage/create/zoho.md b/docs/en/cli-reference/storage/create/zoho.md index f4d04f9cc..8c854ed65 100644 --- a/docs/en/cli-reference/storage/create/zoho.md +++ b/docs/en/cli-reference/storage/create/zoho.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --region Zoho region to connect to. @@ -47,6 +54,9 @@ DESCRIPTION: | com.cn | China | com.au | Australia + --upload-cutoff + Cutoff for switching to large file upload api (>= 10 MiB). + --encoding The encoding for the backend. @@ -64,11 +74,13 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Del,Ctl,InvalidUtf8") [$ENCODING] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Del,Ctl,InvalidUtf8") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] + --upload-cutoff value Cutoff for switching to large file upload api (>= 10 MiB). (default: "10Mi") [$UPLOAD_CUTOFF] Client Config diff --git a/docs/en/cli-reference/storage/update/README.md b/docs/en/cli-reference/storage/update/README.md index 80eb6cba5..66e8bbb49 100644 --- a/docs/en/cli-reference/storage/update/README.md +++ b/docs/en/cli-reference/storage/update/README.md @@ -36,7 +36,7 @@ COMMANDS: premiumizeme premiumize.me putio Put.io qingstor QingCloud Object Storage - s3 Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others + s3 Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, BizflyCloud, Ceph, ChinaMobile, Cloudflare, Cubbit, DigitalOcean, Dreamhost, Exaba, FileLu, FlashBlade, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, Intercolo, IONOS, Leviia, Liara, Linode, LyveCloud, Magalu, Mega, Minio, Netease, Outscale, OVHcloud, Petabox, Qiniu, Rabata, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, Servercore, SpectraLogic, StackPath, Storj, Synology, TencentCOS, Wasabi, Zata, Other seafile seafile sftp SSH/SFTP sharefile Citrix Sharefile @@ -46,7 +46,6 @@ COMMANDS: sugarsync Sugarsync swift OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) union Union merges the contents of several upstream fs - uptobox Uptobox webdav WebDAV yandex Yandex Disk zoho Zoho diff --git a/docs/en/cli-reference/storage/update/azureblob.md b/docs/en/cli-reference/storage/update/azureblob.md index 126ab09a0..0841026e1 100644 --- a/docs/en/cli-reference/storage/update/azureblob.md +++ b/docs/en/cli-reference/storage/update/azureblob.md @@ -35,6 +35,12 @@ DESCRIPTION: Leave blank if using account/key or Emulator. + --connection-string + Storage Connection String. + + Connection string for the storage. Leave blank if using other auth methods. + + --tenant ID of the service principal's tenant. Also called its directory ID. @@ -118,6 +124,20 @@ DESCRIPTION: keys instead of setting `service_principal_file`. + --disable-instance-discovery + Skip requesting Microsoft Entra instance metadata + + This should be set true only by applications authenticating in + disconnected clouds, or private clouds such as Azure Stack. + + It determines whether rclone requests Microsoft Entra instance + metadata from `https://login.microsoft.com/` before + authenticating. + + Setting this to true will skip this request, making you responsible + for ensuring the configured authority is valid and trustworthy. + + --use-msi Use a managed service identity to authenticate (only works in Azure). @@ -150,6 +170,18 @@ DESCRIPTION: Leave blank if using real azure storage endpoint. + --use-az + Use Azure CLI tool az for authentication + + Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/) + as the sole means of authentication. + + Setting this can be useful if you wish to use the az CLI on a host with + a System Managed Identity that you do not want to use. + + Don't set env_auth at the same time. + + --endpoint Endpoint for the service. @@ -183,6 +215,41 @@ DESCRIPTION: "--transfers" * "--azureblob-upload-concurrency" chunks stored at once in memory. + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of chunk_size using the put block list API. + + Files smaller than this limit will be copied with the Copy Blob API. + + --copy-concurrency + Concurrency for multipart copy. + + This is the number of chunks of the same file that are copied + concurrently. + + These chunks are not buffered in memory and Microsoft recommends + setting this value to greater than 1000 in the azcopy documentation. + + https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-optimize#increase-concurrency + + In tests, copy speed increases almost linearly with copy + concurrency. + + --use-copy-blob + Whether to use the Copy Blob API when copying to the same storage account. + + If true (the default) then rclone will use the Copy Blob API for + copies to the same storage account even when the size is above the + copy_cutoff. + + Rclone assumes that the same storage account means the same config + and does not check for the same storage account in different configs. + + There should be no need to change this value. + + --list-chunk Size of blob list. @@ -290,6 +357,7 @@ OPTIONS: --client-certificate-path value Path to a PEM or PKCS12 certificate file including the private key. [$CLIENT_CERTIFICATE_PATH] --client-id value The ID of the client in use. [$CLIENT_ID] --client-secret value One of the service principal's client secrets [$CLIENT_SECRET] + --connection-string value Storage Connection String. [$CONNECTION_STRING] --env-auth Read credentials from runtime (environment variables, CLI or MSI). (default: false) [$ENV_AUTH] --help, -h show help --key value Storage Account Shared Key. [$KEY] @@ -302,10 +370,13 @@ OPTIONS: --archive-tier-delete Delete archive tier blobs before overwriting. (default: false) [$ARCHIVE_TIER_DELETE] --chunk-size value Upload chunk size. (default: "4Mi") [$CHUNK_SIZE] --client-send-certificate-chain Send the certificate chain when using certificate auth. (default: false) [$CLIENT_SEND_CERTIFICATE_CHAIN] + --copy-concurrency value Concurrency for multipart copy. (default: 512) [$COPY_CONCURRENCY] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "8Mi") [$COPY_CUTOFF] --delete-snapshots value Set to specify how to deal with snapshots on blob deletion. [$DELETE_SNAPSHOTS] --description value Description of the remote. [$DESCRIPTION] --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-instance-discovery Skip requesting Microsoft Entra instance metadata (default: false) [$DISABLE_INSTANCE_DISCOVERY] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8") [$ENCODING] --endpoint value Endpoint for the service. [$ENDPOINT] --list-chunk value Size of blob list. (default: 5000) [$LIST_CHUNK] @@ -321,6 +392,8 @@ OPTIONS: --service-principal-file value Path to file containing credentials for use with a service principal. [$SERVICE_PRINCIPAL_FILE] --upload-concurrency value Concurrency for multipart uploads. (default: 16) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload (<= 256 MiB) (deprecated). [$UPLOAD_CUTOFF] + --use-az Use Azure CLI tool az for authentication (default: false) [$USE_AZ] + --use-copy-blob Whether to use the Copy Blob API when copying to the same storage account. (default: true) [$USE_COPY_BLOB] --use-emulator Uses local storage emulator if provided as 'true'. (default: false) [$USE_EMULATOR] --use-msi Use a managed service identity to authenticate (only works in Azure). (default: false) [$USE_MSI] --username value User name (usually an email address) [$USERNAME] diff --git a/docs/en/cli-reference/storage/update/b2.md b/docs/en/cli-reference/storage/update/b2.md index b8dd1f581..abccff91b 100644 --- a/docs/en/cli-reference/storage/update/b2.md +++ b/docs/en/cli-reference/storage/update/b2.md @@ -152,6 +152,38 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --sse-customer-algorithm + If using SSE-C, the server-side encryption algorithm used when storing this object in B2. + + Examples: + | | None + | AES256 | Advanced Encryption Standard (256 bits key length) + + --sse-customer-key + To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data + + Alternatively you can provide --sse-customer-key-base64. + + Examples: + | | None + + --sse-customer-key-base64 + To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data + + Alternatively you can provide --sse-customer-key. + + Examples: + | | None + + --sse-customer-key-md5 + If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + + If you leave it blank, this is calculated automatically from the sse_customer_key provided. + + + Examples: + | | None + --description Description of the remote. @@ -164,22 +196,26 @@ OPTIONS: Advanced - --chunk-size value Upload chunk size. (default: "96Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4Gi") [$COPY_CUTOFF] - --description value Description of the remote. [$DESCRIPTION] - --disable-checksum Disable checksums for large (> upload cutoff) files. (default: false) [$DISABLE_CHECKSUM] - --download-auth-duration value Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. (default: "1w") [$DOWNLOAD_AUTH_DURATION] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --endpoint value Endpoint for the service. [$ENDPOINT] - --lifecycle value Set the number of days deleted files should be kept when creating a bucket. (default: 0) [$LIFECYCLE] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] - --test-mode value A flag string for X-Bz-Test-Mode header for debugging. [$TEST_MODE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --chunk-size value Upload chunk size. (default: "96Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4Gi") [$COPY_CUTOFF] + --description value Description of the remote. [$DESCRIPTION] + --disable-checksum Disable checksums for large (> upload cutoff) files. (default: false) [$DISABLE_CHECKSUM] + --download-auth-duration value Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. (default: "1w") [$DOWNLOAD_AUTH_DURATION] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --endpoint value Endpoint for the service. [$ENDPOINT] + --lifecycle value Set the number of days deleted files should be kept when creating a bucket. (default: 0) [$LIFECYCLE] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in B2. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --test-mode value A flag string for X-Bz-Test-Mode header for debugging. [$TEST_MODE] + --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config diff --git a/docs/en/cli-reference/storage/update/box.md b/docs/en/cli-reference/storage/update/box.md index 5fa04e82e..15fdcbb6f 100644 --- a/docs/en/cli-reference/storage/update/box.md +++ b/docs/en/cli-reference/storage/update/box.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --root-folder-id Fill in for rclone to use a non root folder as its starting point. @@ -42,6 +49,11 @@ DESCRIPTION: Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`. + --config-credentials + Box App config.json contents. + + Leave blank normally. + --access-token Box App Primary Access Token @@ -90,16 +102,18 @@ DESCRIPTION: OPTIONS: - --access-token value Box App Primary Access Token [$ACCESS_TOKEN] - --box-config-file value Box App config.json location [$BOX_CONFIG_FILE] - --box-sub-type value (default: "user") [$BOX_SUB_TYPE] - --client-id value OAuth Client Id. [$CLIENT_ID] - --client-secret value OAuth Client Secret. [$CLIENT_SECRET] - --help, -h show help + --access-token value Box App Primary Access Token [$ACCESS_TOKEN] + --box-config-file value Box App config.json location [$BOX_CONFIG_FILE] + --box-sub-type value (default: "user") [$BOX_SUB_TYPE] + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --config-credentials value Box App config.json contents. [$CONFIG_CREDENTIALS] + --help, -h show help Advanced --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --commit-retries value Max number of times to try committing a multipart file. (default: 100) [$COMMIT_RETRIES] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot") [$ENCODING] diff --git a/docs/en/cli-reference/storage/update/drive.md b/docs/en/cli-reference/storage/update/drive.md index d6dc12528..57a3b512d 100644 --- a/docs/en/cli-reference/storage/update/drive.md +++ b/docs/en/cli-reference/storage/update/drive.md @@ -33,6 +33,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --scope Comma separated list of scopes that rclone should use when requesting access from drive. @@ -415,6 +422,15 @@ DESCRIPTION: | failok | If writing fails log errors only, don't fail the transfer | read,write | Read and Write the value. + --metadata-enforce-expansive-access + Whether the request should enforce expansive access rules. + + From Feb 2026 this flag will be set by default so this flag can be used for + testing before then. + + See: https://developers.google.com/workspace/drive/api/guides/limited-expansive-access + + --encoding The encoding for the backend. @@ -448,6 +464,7 @@ OPTIONS: --auth-owner-only Only consider files owned by the authenticated user. (default: false) [$AUTH_OWNER_ONLY] --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Upload chunk size. (default: "8Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut. (default: false) [$COPY_SHORTCUT_CONTENT] --description value Description of the remote. [$DESCRIPTION] --disable-http2 Disable drive using http2. (default: true) [$DISABLE_HTTP2] @@ -460,6 +477,7 @@ OPTIONS: --import-formats value Comma separated list of preferred formats for uploading Google docs. [$IMPORT_FORMATS] --keep-revision-forever Keep new head revision of each file forever. (default: false) [$KEEP_REVISION_FOREVER] --list-chunk value Size of listing chunk 100-1000, 0 to disable. (default: 1000) [$LIST_CHUNK] + --metadata-enforce-expansive-access Whether the request should enforce expansive access rules. (default: false) [$METADATA_ENFORCE_EXPANSIVE_ACCESS] --metadata-labels value Control whether labels should be read or written in metadata. (default: "off") [$METADATA_LABELS] --metadata-owner value Control whether owner should be read or written in metadata. (default: "read") [$METADATA_OWNER] --metadata-permissions value Control whether permissions should be read or written in metadata. (default: "off") [$METADATA_PERMISSIONS] diff --git a/docs/en/cli-reference/storage/update/dropbox.md b/docs/en/cli-reference/storage/update/dropbox.md index 2ad349fa4..59d1eb670 100644 --- a/docs/en/cli-reference/storage/update/dropbox.md +++ b/docs/en/cli-reference/storage/update/dropbox.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --chunk-size Upload chunk size (< 150Mi). @@ -95,6 +102,32 @@ DESCRIPTION: --root-namespace Specify a different Dropbox namespace ID to use as the root for all paths. + --export-formats + Comma separated list of preferred formats for exporting files + + Certain Dropbox files can only be accessed by exporting them to another format. + These include Dropbox Paper documents. + + For each such file, rclone will choose the first format on this list that Dropbox + considers valid. If none is valid, it will choose Dropbox's default format. + + Known formats include: "html", "md" (markdown) + + --skip-exports + Skip exportable files in all listings. + + If given, exportable files practically become invisible to rclone. + + --show-all-exports + Show all exportable files in listings. + + Adding this flag will allow all exportable files to be server side copied. + Note that rclone doesn't add extensions to the exportable file names in this mode. + + Do **not** use this flag when trying to download exportable files - rclone + will fail to download them. + + --batch-mode Upload file batching sync|async|off. @@ -147,7 +180,7 @@ DESCRIPTION: --batch-commit-timeout - Max time to wait for a batch to finish committing + Max time to wait for a batch to finish committing. (no longer used) --description Description of the remote. @@ -161,18 +194,22 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] - --batch-commit-timeout value Max time to wait for a batch to finish committing (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] + --batch-commit-timeout value Max time to wait for a batch to finish committing. (no longer used) (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] --batch-mode value Upload file batching sync|async|off. (default: "sync") [$BATCH_MODE] --batch-size value Max number of files in upload batch. (default: 0) [$BATCH_SIZE] --batch-timeout value Max time to allow an idle upload batch before uploading. (default: "0s") [$BATCH_TIMEOUT] --chunk-size value Upload chunk size (< 150Mi). (default: "48Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot") [$ENCODING] + --export-formats value Comma separated list of preferred formats for exporting files (default: "html,md") [$EXPORT_FORMATS] --impersonate value Impersonate this user when using a business account. [$IMPERSONATE] --pacer-min-sleep value Minimum time to sleep between API calls. (default: "10ms") [$PACER_MIN_SLEEP] --root-namespace value Specify a different Dropbox namespace ID to use as the root for all paths. [$ROOT_NAMESPACE] --shared-files Instructs rclone to work on individual shared files. (default: false) [$SHARED_FILES] --shared-folders Instructs rclone to work on shared folders. (default: false) [$SHARED_FOLDERS] + --show-all-exports Show all exportable files in listings. (default: false) [$SHOW_ALL_EXPORTS] + --skip-exports Skip exportable files in all listings. (default: false) [$SKIP_EXPORTS] --token value OAuth Access Token as a JSON blob. [$TOKEN] --token-url value Token server url. [$TOKEN_URL] diff --git a/docs/en/cli-reference/storage/update/ftp.md b/docs/en/cli-reference/storage/update/ftp.md index 8a21ba7d0..3b989343d 100644 --- a/docs/en/cli-reference/storage/update/ftp.md +++ b/docs/en/cli-reference/storage/update/ftp.md @@ -95,6 +95,14 @@ DESCRIPTION: --disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) + --allow-insecure-tls-ciphers + Allow insecure TLS ciphers + + Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults: + + - TLS_RSA_WITH_AES_128_GCM_SHA256 + + --shut-timeout Maximum time to wait for data connection closing status. @@ -107,12 +115,38 @@ DESCRIPTION: --socks-proxy Socks 5 proxy host. - Supports the format user:pass@host:port, user@host:port, host:port. - - Example: + Supports the format user:pass@host:port, user@host:port, host:port. - myUser:myPass@localhost:9005 + Example: + myUser:myPass@localhost:9005 + + + --http-proxy + URL for HTTP CONNECT proxy + + Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. + + Supports the format http://user:pass@host:port, http://host:port, http://host. + + Example: + + http://myUser:myPass@proxyhostname.example.com:8000 + + + --no-check-upload + Don't check the upload is OK + + Normally rclone will try to check the upload exists after it has + uploaded a file to make sure the size and modification time are as + expected. + + This flag stops rclone doing these checks. This enables uploading to + folders which are write only. + + You will likely need to use the --inplace flag also if uploading to + a write only folder. + --encoding The encoding for the backend. @@ -139,22 +173,25 @@ OPTIONS: Advanced - --ask-password Allow asking for FTP password when needed. (default: false) [$ASK_PASSWORD] - --close-timeout value Maximum time to wait for a response to close. (default: "1m0s") [$CLOSE_TIMEOUT] - --concurrency value Maximum number of FTP simultaneous connections, 0 for unlimited. (default: 0) [$CONCURRENCY] - --description value Description of the remote. [$DESCRIPTION] - --disable-epsv Disable using EPSV even if server advertises support. (default: false) [$DISABLE_EPSV] - --disable-mlsd Disable using MLSD even if server advertises support. (default: false) [$DISABLE_MLSD] - --disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) (default: false) [$DISABLE_TLS13] - --disable-utf8 Disable using UTF-8 even if server advertises support. (default: false) [$DISABLE_UTF8] - --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,RightSpace,Dot") [$ENCODING] - --force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD. (default: false) [$FORCE_LIST_HIDDEN] - --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] - --no-check-certificate Do not verify the TLS certificate of the server. (default: false) [$NO_CHECK_CERTIFICATE] - --shut-timeout value Maximum time to wait for data connection closing status. (default: "1m0s") [$SHUT_TIMEOUT] - --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] - --tls-cache-size value Size of TLS session cache for all control and data connections. (default: 32) [$TLS_CACHE_SIZE] - --writing-mdtm Use MDTM to set modification time (VsFtpd quirk) (default: false) [$WRITING_MDTM] + --allow-insecure-tls-ciphers Allow insecure TLS ciphers (default: false) [$ALLOW_INSECURE_TLS_CIPHERS] + --ask-password Allow asking for FTP password when needed. (default: false) [$ASK_PASSWORD] + --close-timeout value Maximum time to wait for a response to close. (default: "1m0s") [$CLOSE_TIMEOUT] + --concurrency value Maximum number of FTP simultaneous connections, 0 for unlimited. (default: 0) [$CONCURRENCY] + --description value Description of the remote. [$DESCRIPTION] + --disable-epsv Disable using EPSV even if server advertises support. (default: false) [$DISABLE_EPSV] + --disable-mlsd Disable using MLSD even if server advertises support. (default: false) [$DISABLE_MLSD] + --disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) (default: false) [$DISABLE_TLS13] + --disable-utf8 Disable using UTF-8 even if server advertises support. (default: false) [$DISABLE_UTF8] + --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,RightSpace,Dot") [$ENCODING] + --force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD. (default: false) [$FORCE_LIST_HIDDEN] + --http-proxy value URL for HTTP CONNECT proxy [$HTTP_PROXY] + --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] + --no-check-certificate Do not verify the TLS certificate of the server. (default: false) [$NO_CHECK_CERTIFICATE] + --no-check-upload Don't check the upload is OK (default: false) [$NO_CHECK_UPLOAD] + --shut-timeout value Maximum time to wait for data connection closing status. (default: "1m0s") [$SHUT_TIMEOUT] + --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] + --tls-cache-size value Size of TLS session cache for all control and data connections. (default: 32) [$TLS_CACHE_SIZE] + --writing-mdtm Use MDTM to set modification time (VsFtpd quirk) (default: false) [$WRITING_MDTM] Client Config diff --git a/docs/en/cli-reference/storage/update/gcs.md b/docs/en/cli-reference/storage/update/gcs.md index 311f27b35..c68094dc0 100644 --- a/docs/en/cli-reference/storage/update/gcs.md +++ b/docs/en/cli-reference/storage/update/gcs.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --project-number Project number. @@ -56,6 +63,12 @@ DESCRIPTION: Leave blank normally. Needed only if you want use SA instead of interactive login. + --access-token + Short-lived access token. + + Leave blank normally. + Needed only if you want use short-lived access token instead of interactive login. + --anonymous Access public buckets and objects without credentials. @@ -136,6 +149,7 @@ DESCRIPTION: | us-central1 | Iowa | us-east1 | South Carolina | us-east4 | Northern Virginia + | us-east5 | Ohio | us-west1 | Oregon | us-west2 | California | us-west3 | Salt Lake City @@ -186,9 +200,19 @@ DESCRIPTION: --endpoint - Endpoint for the service. + Custom endpoint for the storage API. Leave blank to use the provider default. - Leave blank normally. + When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint), + the subpath will be ignored during upload operations due to a limitation in the + underlying Google API Go client library. + Download and listing operations will work correctly with the full endpoint path. + If you require subpath support for uploads, avoid using subpaths in your custom + endpoint configuration. + + Examples: + | storage.example.org | Specify a custom endpoint + | storage.example.org:4443 | Specifying a custom endpoint with port + | storage.example.org:4443/gcs/api | Specifying a subpath, see the note, uploads won't use the custom path! --encoding The encoding for the backend. @@ -226,15 +250,17 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --description value Description of the remote. [$DESCRIPTION] - --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] - --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] - --endpoint value Endpoint for the service. [$ENDPOINT] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --access-token value Short-lived access token. [$ACCESS_TOKEN] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] + --endpoint value Custom endpoint for the storage API. Leave blank to use the provider default. [$ENDPOINT] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config diff --git a/docs/en/cli-reference/storage/update/gphotos.md b/docs/en/cli-reference/storage/update/gphotos.md index 5169a5d7a..eb191e62e 100644 --- a/docs/en/cli-reference/storage/update/gphotos.md +++ b/docs/en/cli-reference/storage/update/gphotos.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --read-only Set to make the Google Photos backend read only. @@ -65,6 +72,32 @@ DESCRIPTION: Without this flag, archived media will not be visible in directory listings and won't be transferred. + --proxy + Use the gphotosdl proxy for downloading the full resolution images + + The Google API will deliver images and video which aren't full + resolution, and/or have EXIF data missing. + + However if you use the gphotosdl proxy then you can download original, + unchanged images. + + This runs a headless browser in the background. + + Download the software from [gphotosdl](https://github.com/rclone/gphotosdl) + + First run with + + gphotosdl -login + + Then once you have logged into google photos close the browser window + and run + + gphotosdl + + Then supply the parameter `--gphotos-proxy "http://localhost:8282"` to make + rclone use the proxy. + + --encoding The encoding for the backend. @@ -120,7 +153,7 @@ DESCRIPTION: --batch-commit-timeout - Max time to wait for a batch to finish committing + Max time to wait for a batch to finish committing. (no longer used) --description Description of the remote. @@ -135,13 +168,15 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] - --batch-commit-timeout value Max time to wait for a batch to finish committing (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] + --batch-commit-timeout value Max time to wait for a batch to finish committing. (no longer used) (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] --batch-mode value Upload file batching sync|async|off. (default: "sync") [$BATCH_MODE] --batch-size value Max number of files in upload batch. (default: 0) [$BATCH_SIZE] --batch-timeout value Max time to allow an idle upload batch before uploading. (default: "0s") [$BATCH_TIMEOUT] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] --include-archived Also view and download archived media. (default: false) [$INCLUDE_ARCHIVED] + --proxy value Use the gphotosdl proxy for downloading the full resolution images [$PROXY] --read-size Set to read the size of media items. (default: false) [$READ_SIZE] --start-year value Year limits the photos to be downloaded to those which are uploaded after the given year. (default: 2000) [$START_YEAR] --token value OAuth Access Token as a JSON blob. [$TOKEN] diff --git a/docs/en/cli-reference/storage/update/hidrive.md b/docs/en/cli-reference/storage/update/hidrive.md index 310805666..ad4f9eeec 100644 --- a/docs/en/cli-reference/storage/update/hidrive.md +++ b/docs/en/cli-reference/storage/update/hidrive.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --scope-access Access permissions that rclone should use when requesting access from HiDrive. @@ -121,6 +128,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Chunksize for chunked uploads. (default: "48Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary. (default: false) [$DISABLE_FETCHING_MEMBER_COUNT] --encoding value The encoding for the backend. (default: "Slash,Dot") [$ENCODING] diff --git a/docs/en/cli-reference/storage/update/internetarchive.md b/docs/en/cli-reference/storage/update/internetarchive.md index 88199e1f8..63cac0724 100644 --- a/docs/en/cli-reference/storage/update/internetarchive.md +++ b/docs/en/cli-reference/storage/update/internetarchive.md @@ -30,6 +30,15 @@ DESCRIPTION: Leave blank for default value. + --item-metadata + Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set. + Format is key=value and the 'x-archive-meta-' prefix is automatically added. + + --item-derive + Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload. + The derive process produces a number of secondary files from an upload to make an upload more usable on the web. + Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure. + --disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone. Normally rclone will calculate the MD5 checksum of the input before @@ -54,6 +63,7 @@ DESCRIPTION: OPTIONS: --access-key-id value IAS3 Access Key. [$ACCESS_KEY_ID] --help, -h show help + --item-derive Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload. (default: true) [$ITEM_DERIVE] --secret-access-key value IAS3 Secret Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -63,6 +73,7 @@ OPTIONS: --encoding value The encoding for the backend. (default: "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --endpoint value IAS3 Endpoint. (default: "https://s3.us.archive.org") [$ENDPOINT] --front-endpoint value Host of InternetArchive Frontend. (default: "https://archive.org") [$FRONT_ENDPOINT] + --item-metadata value Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set. [$ITEM_METADATA] --wait-archive value Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish. (default: "0s") [$WAIT_ARCHIVE] Client Config diff --git a/docs/en/cli-reference/storage/update/jottacloud.md b/docs/en/cli-reference/storage/update/jottacloud.md index 7c2814501..73afabe7f 100644 --- a/docs/en/cli-reference/storage/update/jottacloud.md +++ b/docs/en/cli-reference/storage/update/jottacloud.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --md5-memory-limit Files bigger than this will be cached on disk to calculate the MD5 if required. @@ -66,6 +73,7 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] diff --git a/docs/en/cli-reference/storage/update/local.md b/docs/en/cli-reference/storage/update/local.md index 7ea63a2cc..a6620f469 100644 --- a/docs/en/cli-reference/storage/update/local.md +++ b/docs/en/cli-reference/storage/update/local.md @@ -19,7 +19,7 @@ DESCRIPTION: Follow symlinks and copy the pointed to item. --links - Translate symlinks to/from regular files with a '.rclonelink' extension. + Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend. --skip-links Don't warn about skipped symlinks. @@ -27,6 +27,13 @@ DESCRIPTION: This flag disables warning messages on skipped symlinks or junction points, as you explicitly acknowledge that they should be skipped. + --skip-specials + Don't warn about skipped pipes, sockets and device objects. + + This flag disables warning messages on skipped pipes, sockets and + device objects, as you explicitly acknowledge that they should be + skipped. + --zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated). @@ -176,6 +183,9 @@ DESCRIPTION: | btime | The creation time. | ctime | The last status change time. + --hashes + Comma separated list of supported checksum types. + --encoding The encoding for the backend. @@ -195,7 +205,8 @@ OPTIONS: --copy-links, -L Follow symlinks and copy the pointed to item. (default: false) [$COPY_LINKS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Dot") [$ENCODING] - --links, -l Translate symlinks to/from regular files with a '.rclonelink' extension. (default: false) [$LINKS] + --hashes value Comma separated list of supported checksum types. [$HASHES] + --links Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend. (default: false) [$LINKS] --no-check-updated Don't check to see if the files change during upload. (default: false) [$NO_CHECK_UPDATED] --no-clone Disable reflink cloning for server-side copies. (default: false) [$NO_CLONE] --no-preallocate Disable preallocation of disk space for transferred files. (default: false) [$NO_PREALLOCATE] @@ -204,6 +215,7 @@ OPTIONS: --nounc Disable UNC (long path names) conversion on Windows. (default: false) [$NOUNC] --one-file-system, -x Don't cross filesystem boundaries (unix/macOS only). (default: false) [$ONE_FILE_SYSTEM] --skip-links Don't warn about skipped symlinks. (default: false) [$SKIP_LINKS] + --skip-specials Don't warn about skipped pipes, sockets and device objects. (default: false) [$SKIP_SPECIALS] --time-type value Set what kind of time is returned. (default: "mtime") [$TIME_TYPE] --unicode-normalization Apply unicode NFC normalization to paths and filenames. (default: false) [$UNICODE_NORMALIZATION] --zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated). (default: false) [$ZERO_SIZE_LINKS] diff --git a/docs/en/cli-reference/storage/update/mailru.md b/docs/en/cli-reference/storage/update/mailru.md index dfc0c53d0..a79b942a1 100644 --- a/docs/en/cli-reference/storage/update/mailru.md +++ b/docs/en/cli-reference/storage/update/mailru.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --user User name (usually email). @@ -130,6 +137,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --check-hash What should copy do if file checksum is mismatched or invalid. (default: true) [$CHECK_HASH] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --quirks value Comma separated list of internal maintenance flags. [$QUIRKS] diff --git a/docs/en/cli-reference/storage/update/mega.md b/docs/en/cli-reference/storage/update/mega.md index 8867c98c9..e7009b6d2 100644 --- a/docs/en/cli-reference/storage/update/mega.md +++ b/docs/en/cli-reference/storage/update/mega.md @@ -15,6 +15,15 @@ DESCRIPTION: --pass Password. + --2fa + The 2FA code of your MEGA account if the account is set up with one + + --session-id + Session (internal use only) + + --master-key + Master key (internal use only) + --debug Output more debug from Mega. @@ -47,6 +56,7 @@ DESCRIPTION: OPTIONS: + --2fa value The 2FA code of your MEGA account if the account is set up with one [$2FA] --help, -h show help --pass value Password. [$PASS] --user value User name. [$USER] @@ -57,6 +67,8 @@ OPTIONS: --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] + --master-key value Master key (internal use only) [$MASTER_KEY] + --session-id value Session (internal use only) [$SESSION_ID] --use-https Use HTTPS for transfers. (default: false) [$USE_HTTPS] Client Config diff --git a/docs/en/cli-reference/storage/update/onedrive.md b/docs/en/cli-reference/storage/update/onedrive.md index 180bd1381..ca65bce75 100644 --- a/docs/en/cli-reference/storage/update/onedrive.md +++ b/docs/en/cli-reference/storage/update/onedrive.md @@ -32,15 +32,35 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --region Choose national cloud region for OneDrive. Examples: | global | Microsoft Cloud Global | us | Microsoft Cloud for US Government - | de | Microsoft Cloud Germany + | de | Microsoft Cloud Germany (deprecated - try global region first). | cn | Azure and Office 365 operated by Vnet Group in China + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + + This is disabled by default as uploading using single part uploads + causes rclone to use twice the storage on Onedrive business as when + rclone sets the modification time after the upload Onedrive creates a + new version. + + See: https://github.com/rclone/rclone/issues/1716 + + --chunk-size Chunk size to upload files with - must be multiple of 320k (327,680 bytes). @@ -74,6 +94,13 @@ DESCRIPTION: | Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All offline_access | Read and write access to all resources, without the ability to browse SharePoint sites. | | Same as if disable_site_permission was set to true + --tenant + ID of the service principal's tenant. Also called its directory ID. + + Set this if using + - Client Credential flow + + --disable-site-permission Disable the request for Sites.Read.All permission. @@ -221,7 +248,7 @@ DESCRIPTION: As a rule of thumb if nearly all of your data is under rclone's root directory (the `root/directory` in `onedrive:root/directory`) then - using this flag will be be a big performance win. If your data is + using this flag will be a big performance win. If your data is mostly not under the root then using this flag will be a big performance loss. @@ -257,6 +284,7 @@ OPTIONS: --client-secret value OAuth Client Secret. [$CLIENT_SECRET] --help, -h show help --region value Choose national cloud region for OneDrive. (default: "global") [$REGION] + --tenant value ID of the service principal's tenant. Also called its directory ID. [$TENANT] Advanced @@ -264,6 +292,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --av-override Allows download of files the server thinks has a virus. (default: false) [$AV_OVERRIDE] --chunk-size value Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default: "10Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --delta If set rclone will use delta listing to implement recursive listings. (default: false) [$DELTA] --description value Description of the remote. [$DESCRIPTION] --disable-site-permission Disable the request for Sites.Read.All permission. (default: false) [$DISABLE_SITE_PERMISSION] @@ -283,6 +312,7 @@ OPTIONS: --server-side-across-configs Deprecated: use --server-side-across-configs instead. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] --token value OAuth Access Token as a JSON blob. [$TOKEN] --token-url value Token server url. [$TOKEN_URL] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "off") [$UPLOAD_CUTOFF] Client Config diff --git a/docs/en/cli-reference/storage/update/oos/env_auth.md b/docs/en/cli-reference/storage/update/oos/env_auth.md index 5f08c9ead..a05ed83ab 100644 --- a/docs/en/cli-reference/storage/update/oos/env_auth.md +++ b/docs/en/cli-reference/storage/update/oos/env_auth.md @@ -13,7 +13,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -188,7 +190,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --endpoint value Endpoint for Object storage API. [$ENDPOINT] --help, -h show help --namespace value Object storage namespace [$NAMESPACE] diff --git a/docs/en/cli-reference/storage/update/oos/instance_principal_auth.md b/docs/en/cli-reference/storage/update/oos/instance_principal_auth.md index 94f1d25dc..a8e4e37cf 100644 --- a/docs/en/cli-reference/storage/update/oos/instance_principal_auth.md +++ b/docs/en/cli-reference/storage/update/oos/instance_principal_auth.md @@ -17,7 +17,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -192,7 +194,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --endpoint value Endpoint for Object storage API. [$ENDPOINT] --help, -h show help --namespace value Object storage namespace [$NAMESPACE] diff --git a/docs/en/cli-reference/storage/update/oos/resource_principal_auth.md b/docs/en/cli-reference/storage/update/oos/resource_principal_auth.md index 25c9f4ec1..348f19dd8 100644 --- a/docs/en/cli-reference/storage/update/oos/resource_principal_auth.md +++ b/docs/en/cli-reference/storage/update/oos/resource_principal_auth.md @@ -13,7 +13,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -188,7 +190,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --endpoint value Endpoint for Object storage API. [$ENDPOINT] --help, -h show help --namespace value Object storage namespace [$NAMESPACE] diff --git a/docs/en/cli-reference/storage/update/oos/user_principal_auth.md b/docs/en/cli-reference/storage/update/oos/user_principal_auth.md index 7718a195b..ee16f2a24 100644 --- a/docs/en/cli-reference/storage/update/oos/user_principal_auth.md +++ b/docs/en/cli-reference/storage/update/oos/user_principal_auth.md @@ -17,7 +17,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -204,7 +206,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --config-file value Path to OCI config file (default: "~/.oci/config") [$CONFIG_FILE] --config-profile value Profile name inside the oci config file (default: "Default") [$CONFIG_PROFILE] --endpoint value Endpoint for Object storage API. [$ENDPOINT] diff --git a/docs/en/cli-reference/storage/update/oos/workload_identity_auth.md b/docs/en/cli-reference/storage/update/oos/workload_identity_auth.md index ce702dadc..243399c86 100644 --- a/docs/en/cli-reference/storage/update/oos/workload_identity_auth.md +++ b/docs/en/cli-reference/storage/update/oos/workload_identity_auth.md @@ -15,7 +15,9 @@ DESCRIPTION: Object storage namespace --compartment - Object storage compartment OCID + Specify compartment OCID, if you need to list buckets. + + List objects works without compartment OCID. --region Object storage Region @@ -190,7 +192,7 @@ DESCRIPTION: OPTIONS: - --compartment value Object storage compartment OCID [$COMPARTMENT] + --compartment value Specify compartment OCID, if you need to list buckets. [$COMPARTMENT] --endpoint value Endpoint for Object storage API. [$ENDPOINT] --help, -h show help --namespace value Object storage namespace [$NAMESPACE] diff --git a/docs/en/cli-reference/storage/update/opendrive.md b/docs/en/cli-reference/storage/update/opendrive.md index 0344ccbe1..0c4dd7806 100644 --- a/docs/en/cli-reference/storage/update/opendrive.md +++ b/docs/en/cli-reference/storage/update/opendrive.md @@ -26,6 +26,14 @@ DESCRIPTION: Note that these chunks are buffered in memory so increasing them will increase memory use. + --access + Files and folders will be uploaded with this access permission (default private) + + Examples: + | private | The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them. + | public | The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way, + | hidden | The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents + --description Description of the remote. @@ -37,6 +45,7 @@ OPTIONS: Advanced + --access value Files and folders will be uploaded with this access permission (default private) (default: "private") [$ACCESS] --chunk-size value Files will be uploaded in chunks this size. (default: "10Mi") [$CHUNK_SIZE] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot") [$ENCODING] diff --git a/docs/en/cli-reference/storage/update/pcloud.md b/docs/en/cli-reference/storage/update/pcloud.md index a6620394a..e9d8ac66f 100644 --- a/docs/en/cli-reference/storage/update/pcloud.md +++ b/docs/en/cli-reference/storage/update/pcloud.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --encoding The encoding for the backend. @@ -74,6 +81,7 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --hostname value Hostname to connect to. (default: "api.pcloud.com") [$HOSTNAME] diff --git a/docs/en/cli-reference/storage/update/premiumizeme.md b/docs/en/cli-reference/storage/update/premiumizeme.md index ba49fb4c8..540752b81 100644 --- a/docs/en/cli-reference/storage/update/premiumizeme.md +++ b/docs/en/cli-reference/storage/update/premiumizeme.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --api-key API Key. @@ -55,11 +62,12 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config diff --git a/docs/en/cli-reference/storage/update/putio.md b/docs/en/cli-reference/storage/update/putio.md index 9e2b9a2d9..7e35580b4 100644 --- a/docs/en/cli-reference/storage/update/putio.md +++ b/docs/en/cli-reference/storage/update/putio.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --encoding The encoding for the backend. @@ -48,11 +55,12 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config diff --git a/docs/en/cli-reference/storage/update/s3/README.md b/docs/en/cli-reference/storage/update/s3/README.md index 78f83e923..6899e4b6b 100644 --- a/docs/en/cli-reference/storage/update/s3/README.md +++ b/docs/en/cli-reference/storage/update/s3/README.md @@ -1,9 +1,9 @@ -# Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others +# Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, BizflyCloud, Ceph, ChinaMobile, Cloudflare, Cubbit, DigitalOcean, Dreamhost, Exaba, FileLu, FlashBlade, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, Intercolo, IONOS, Leviia, Liara, Linode, LyveCloud, Magalu, Mega, Minio, Netease, Outscale, OVHcloud, Petabox, Qiniu, Rabata, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, Servercore, SpectraLogic, StackPath, Storj, Synology, TencentCOS, Wasabi, Zata, Other {% code fullWidth="true" %} ``` NAME: - singularity storage update s3 - Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others + singularity storage update s3 - Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, BizflyCloud, Ceph, ChinaMobile, Cloudflare, Cubbit, DigitalOcean, Dreamhost, Exaba, FileLu, FlashBlade, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, Intercolo, IONOS, Leviia, Liara, Linode, LyveCloud, Magalu, Mega, Minio, Netease, Outscale, OVHcloud, Petabox, Qiniu, Rabata, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, Servercore, SpectraLogic, StackPath, Storj, Synology, TencentCOS, Wasabi, Zata, Other USAGE: singularity storage update s3 command [command options] @@ -12,35 +12,50 @@ COMMANDS: aws Amazon Web Services (AWS) S3 alibaba Alibaba Cloud Object Storage System (OSS) formerly Aliyun arvancloud Arvan Cloud Object Storage (AOS) + bizflycloud Bizfly Cloud Simple Storage ceph Ceph Object Storage chinamobile China Mobile Ecloud Elastic Object Storage (EOS) cloudflare Cloudflare R2 Storage + cubbit Cubbit DS3 Object Storage digitalocean DigitalOcean Spaces dreamhost Dreamhost DreamObjects + exaba Exaba Object Storage + filelu FileLu S5 (S3-Compatible Object Storage) + flashblade Pure Storage FlashBlade Object Storage gcs Google Cloud Storage + hetzner Hetzner Object Storage huaweiobs Huawei Object Storage Service ibmcos IBM COS S3 idrive IDrive e2 ionos IONOS Cloud + intercolo Intercolo Object Storage leviia Leviia Object Storage liara Liara Object Storage linode Linode Object Storage lyvecloud Seagate Lyve Cloud magalu Magalu Object Storage + mega MEGA S4 Object Storage minio Minio Object Storage netease Netease Object Storage (NOS) + ovhcloud OVHcloud Object Storage other Any other S3 compatible provider + outscale OUTSCALE Object Storage (OOS) petabox Petabox Object Storage qiniu Qiniu Object Storage (Kodo) + rabata Rabata Cloud Storage rackcorp RackCorp Object Storage rclone Rclone S3 Server scaleway Scaleway Object Storage seaweedfs SeaweedFS S3 + selectel Selectel Object Storage + servercore Servercore Object Storage + spectralogic Spectra Logic Black Pearl stackpath StackPath Object Storage storj Storj (S3 Compatible Gateway) synology Synology C2 Object Storage tencentcos Tencent Cloud Object Storage (COS) wasabi Wasabi Object Storage + zata Zata (S3 compatible Gateway) help, h Shows a list of commands or help for one command OPTIONS: diff --git a/docs/en/cli-reference/storage/update/s3/alibaba.md b/docs/en/cli-reference/storage/update/s3/alibaba.md index bfae28c5f..d8ba70966 100644 --- a/docs/en/cli-reference/storage/update/s3/alibaba.md +++ b/docs/en/cli-reference/storage/update/s3/alibaba.md @@ -29,7 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for OSS API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | oss-accelerate.aliyuncs.com | Global Accelerate @@ -96,13 +98,7 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in OSS. - - Examples: - | | Default - | STANDARD | Standard storage class - | GLACIER | Archive storage mode - | STANDARD_IA | Infrequent access storage mode + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -195,6 +191,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -234,6 +250,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -374,6 +393,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -386,7 +410,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -488,6 +512,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -514,11 +562,11 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for OSS API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in OSS. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -545,18 +593,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/arvancloud.md b/docs/en/cli-reference/storage/update/s3/arvancloud.md index 48da5f740..7e0650fd9 100644 --- a/docs/en/cli-reference/storage/update/s3/arvancloud.md +++ b/docs/en/cli-reference/storage/update/s3/arvancloud.md @@ -29,7 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Arvan Cloud Object Storage (AOS) API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3.ir-thr-at1.arvanstorage.ir | The default endpoint - a good choice if you are unsure. @@ -37,9 +39,9 @@ DESCRIPTION: | s3.ir-tbz-sh1.arvanstorage.ir | Tabriz Iran (Shahriar) --location-constraint - Location constraint - must match endpoint. + Location constraint - must be set to match the Region. - Used when creating buckets only. + Leave blank if not sure. Used when creating buckets only. Examples: | ir-thr-at1 | Tehran Iran (Simin) @@ -83,10 +85,7 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in ArvanCloud. - - Examples: - | STANDARD | Standard storage class + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -179,6 +178,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -218,6 +237,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -358,6 +380,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -370,7 +397,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -472,6 +499,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -498,12 +549,12 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Arvan Cloud Object Storage (AOS) API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --location-constraint value Location constraint - must match endpoint. [$LOCATION_CONSTRAINT] + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in ArvanCloud. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -530,18 +581,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/aws.md b/docs/en/cli-reference/storage/update/s3/aws.md index af0af55bd..07b9595db 100644 --- a/docs/en/cli-reference/storage/update/s3/aws.md +++ b/docs/en/cli-reference/storage/update/s3/aws.md @@ -30,6 +30,8 @@ DESCRIPTION: --region Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. Examples: | us-east-1 | The default endpoint - a good choice if you are unsure. @@ -89,12 +91,12 @@ DESCRIPTION: --endpoint Endpoint for S3 API. - Leave blank if using AWS to use the default endpoint for the region. + Required when using an S3 clone. --location-constraint Location constraint - must be set to match the Region. - Used when creating buckets only. + Leave blank if not sure. Used when creating buckets only. Examples: | | Empty for US Region, Northern Virginia, or Pacific Northwest @@ -167,10 +169,6 @@ DESCRIPTION: --server-side-encryption The server-side encryption algorithm used when storing this object in S3. - Examples: - | | None - | AES256 | AES256 - --sse-customer-algorithm If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -214,15 +212,12 @@ DESCRIPTION: The storage class to use when storing new objects in S3. Examples: - | | Default - | STANDARD | Standard storage class | REDUCED_REDUNDANCY | Reduced redundancy storage class | STANDARD_IA | Standard Infrequent Access storage class | ONEZONE_IA | One Zone Infrequent Access storage class - | GLACIER | Glacier storage class + | GLACIER | Glacier Flexible Retrieval storage class | DEEP_ARCHIVE | Glacier Deep Archive storage class | INTELLIGENT_TIERING | Intelligent-Tiering storage class - | GLACIER_IR | Glacier Instant Retrieval storage class --upload-cutoff Cutoff for switching to chunked upload. @@ -315,6 +310,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -359,6 +374,9 @@ DESCRIPTION: See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html) + --use-arn-region + If true, enables arn region support for the service. + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. @@ -507,6 +525,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -519,7 +542,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -626,6 +649,56 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --directory-bucket + Set to use AWS Directory Buckets + + If you are using an AWS Directory Bucket then set this flag. + + This will ensure no `Content-Md5` headers are sent and ensure `ETag` + headers are not interpreted as MD5 sums. `X-Amz-Meta-Md5chksum` will + be set on all objects whether single or multipart uploaded. + + This also sets `no_check_bucket = true`. + + Note that Directory Buckets do not support: + + - Versioning + - `Content-Encoding: gzip` + + Rclone limitations with Directory Buckets: + + - rclone does not support creating Directory Buckets with `rclone mkdir` + - ... or removing them with `rclone rmdir` yet + - Directory Buckets do not appear when doing `rclone lsf` at the top level. + - Rclone can't remove auto created directories yet. In theory this should + work with `directory_markers = true` but it doesn't. + - Directories don't seem to appear in recursive (ListR) listings. + + --sdk-log-mode Set to debug the SDK @@ -669,6 +742,7 @@ OPTIONS: --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] --description value Description of the remote. [$DESCRIPTION] + --directory-bucket Set to use AWS Directory Buckets (default: false) [$DIRECTORY_BUCKET] --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] @@ -689,9 +763,14 @@ OPTIONS: --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] --requester-pays Enables requester pays option when interacting with S3 bucket. (default: false) [$REQUESTER_PAYS] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] @@ -702,11 +781,14 @@ OPTIONS: --use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. (default: false) [$USE_ACCELERATE_ENDPOINT] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/bizflycloud.md b/docs/en/cli-reference/storage/update/s3/bizflycloud.md new file mode 100644 index 000000000..d371a4079 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/bizflycloud.md @@ -0,0 +1,629 @@ +# Bizfly Cloud Simple Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 bizflycloud - Bizfly Cloud Simple Storage + +USAGE: + singularity storage update s3 bizflycloud [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | hn | Ha Noi + | hcm | Ho Chi Minh + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | hn.ss.bfcplatform.vn | Hanoi endpoint + | hcm.ss.bfcplatform.vn | Ho Chi Minh endpoint + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/ceph.md b/docs/en/cli-reference/storage/update/s3/ceph.md index e0d535759..beff1cea2 100644 --- a/docs/en/cli-reference/storage/update/s3/ceph.md +++ b/docs/en/cli-reference/storage/update/s3/ceph.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -89,10 +83,6 @@ DESCRIPTION: --server-side-encryption The server-side encryption algorithm used when storing this object in S3. - Examples: - | | None - | AES256 | AES256 - --sse-customer-algorithm If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -223,6 +213,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -262,6 +272,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -402,6 +415,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -414,7 +432,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -516,6 +534,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -576,9 +618,14 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] @@ -587,11 +634,14 @@ OPTIONS: --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/chinamobile.md b/docs/en/cli-reference/storage/update/s3/chinamobile.md index ebe1a5cf5..4251d67c4 100644 --- a/docs/en/cli-reference/storage/update/s3/chinamobile.md +++ b/docs/en/cli-reference/storage/update/s3/chinamobile.md @@ -29,7 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | eos-wuxi-1.cmecloud.cn | The default endpoint - a good choice if you are unsure. @@ -65,9 +67,9 @@ DESCRIPTION: | eos-anhui-1.cmecloud.cn | Anhui China (Huainan) --location-constraint - Location constraint - must match endpoint. + Location constraint - must be set to match the Region. - Used when creating buckets only. + Leave blank if not sure. Used when creating buckets only. Examples: | wuxi1 | East China (Suzhou) @@ -86,7 +88,7 @@ DESCRIPTION: | chengdu1 | Southwest China (Chengdu) | chongqing1 | Southwest China (Chongqing) | guiyang1 | Southwest China (Guiyang) - | xian1 | Nouthwest China (Xian) + | xian1 | Northwest China (Xian) | yunnan | Yunnan China (Kunming) | yunnan2 | Yunnan China (Kunming-2) | tianjin1 | Tianjin China (Tianjin) @@ -141,10 +143,6 @@ DESCRIPTION: --server-side-encryption The server-side encryption algorithm used when storing this object in S3. - Examples: - | | None - | AES256 | AES256 - --sse-customer-algorithm If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -178,13 +176,7 @@ DESCRIPTION: | | None --storage-class - The storage class to use when storing new objects in ChinaMobile. - - Examples: - | | Default - | STANDARD | Standard storage class - | GLACIER | Archive storage mode - | STANDARD_IA | Infrequent access storage mode + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -277,6 +269,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -316,6 +328,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -456,6 +471,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -468,7 +488,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -570,6 +590,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -596,13 +640,13 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --location-constraint value Location constraint - must match endpoint. [$LOCATION_CONSTRAINT] + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] --server-side-encryption value The server-side encryption algorithm used when storing this object in S3. [$SERVER_SIDE_ENCRYPTION] - --storage-class value The storage class to use when storing new objects in ChinaMobile. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -629,9 +673,14 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] @@ -640,11 +689,14 @@ OPTIONS: --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/cloudflare.md b/docs/en/cli-reference/storage/update/s3/cloudflare.md index b55a44db1..df3e3388f 100644 --- a/docs/en/cli-reference/storage/update/s3/cloudflare.md +++ b/docs/en/cli-reference/storage/update/s3/cloudflare.md @@ -30,6 +30,8 @@ DESCRIPTION: --region Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. Examples: | auto | R2 buckets are automatically distributed across Cloudflare's data centers for low latency. @@ -39,29 +41,6 @@ DESCRIPTION: Required when using an S3 clone. - --bucket-acl - Canned ACL used when creating buckets. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when only when creating buckets. If it - isn't set then "acl" is used instead. - - If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: - header is added and the default (private) will be used. - - - Examples: - | private | Owner gets FULL_CONTROL. - | | No one else has access rights (default). - | public-read | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ access. - | public-read-write | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ and WRITE access. - | | Granting this on a bucket is generally not recommended. - | authenticated-read | Owner gets FULL_CONTROL. - | | The AuthenticatedUsers group gets READ access. - --upload-cutoff Cutoff for switching to chunked upload. @@ -153,6 +132,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -192,6 +191,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -332,6 +334,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -344,7 +351,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -446,6 +453,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -479,7 +510,6 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] @@ -502,18 +532,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/cubbit.md b/docs/en/cli-reference/storage/update/s3/cubbit.md new file mode 100644 index 000000000..561966929 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/cubbit.md @@ -0,0 +1,627 @@ +# Cubbit DS3 Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 cubbit - Cubbit DS3 Object Storage + +USAGE: + singularity storage update s3 cubbit [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | eu-west-1 | Europe West + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.cubbit.eu | Cubbit DS3 Object Storage endpoint + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/digitalocean.md b/docs/en/cli-reference/storage/update/s3/digitalocean.md index 90dec5875..6e7fca023 100644 --- a/docs/en/cli-reference/storage/update/s3/digitalocean.md +++ b/docs/en/cli-reference/storage/update/s3/digitalocean.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -47,10 +41,14 @@ DESCRIPTION: Examples: | syd1.digitaloceanspaces.com | DigitalOcean Spaces Sydney 1 | sfo3.digitaloceanspaces.com | DigitalOcean Spaces San Francisco 3 + | sfo2.digitaloceanspaces.com | DigitalOcean Spaces San Francisco 2 | fra1.digitaloceanspaces.com | DigitalOcean Spaces Frankfurt 1 | nyc3.digitaloceanspaces.com | DigitalOcean Spaces New York 3 | ams3.digitaloceanspaces.com | DigitalOcean Spaces Amsterdam 3 | sgp1.digitaloceanspaces.com | DigitalOcean Spaces Singapore 1 + | lon1.digitaloceanspaces.com | DigitalOcean Spaces London 1 + | tor1.digitaloceanspaces.com | DigitalOcean Spaces Toronto 1 + | blr1.digitaloceanspaces.com | DigitalOcean Spaces Bangalore 1 --location-constraint Location constraint - must be set to match the Region. @@ -185,6 +183,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -224,6 +242,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -364,6 +385,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -376,7 +402,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -478,6 +504,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -536,18 +586,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/dreamhost.md b/docs/en/cli-reference/storage/update/s3/dreamhost.md index b433bf91f..a9235c36b 100644 --- a/docs/en/cli-reference/storage/update/s3/dreamhost.md +++ b/docs/en/cli-reference/storage/update/s3/dreamhost.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -180,6 +174,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -219,6 +233,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -359,6 +376,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -371,7 +393,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -473,6 +495,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -531,18 +577,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/exaba.md b/docs/en/cli-reference/storage/update/s3/exaba.md new file mode 100644 index 000000000..0af99894b --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/exaba.md @@ -0,0 +1,627 @@ +# Exaba Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 exaba - Exaba Object Storage + +USAGE: + singularity storage update s3 exaba [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/filelu.md b/docs/en/cli-reference/storage/update/s3/filelu.md new file mode 100644 index 000000000..59563dc65 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/filelu.md @@ -0,0 +1,635 @@ +# FileLu S5 (S3-Compatible Object Storage) + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 filelu - FileLu S5 (S3-Compatible Object Storage) + +USAGE: + singularity storage update s3 filelu [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | global | Global + | us-east | North America (US-East) + | eu-central | Europe (EU-Central) + | ap-southeast | Asia Pacific (AP-Southeast) + | me-central | Middle East (ME-Central) + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s5lu.com | Global FileLu S5 endpoint + | us.s5lu.com | North America (US-East) region endpoint + | eu.s5lu.com | Europe (EU-Central) region endpoint + | ap.s5lu.com | Asia Pacific (AP-Southeast) region endpoint + | me.s5lu.com | Middle East (ME-Central) region endpoint + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/flashblade.md b/docs/en/cli-reference/storage/update/s3/flashblade.md new file mode 100644 index 000000000..8f8650b6f --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/flashblade.md @@ -0,0 +1,576 @@ +# Pure Storage FlashBlade Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 flashblade - Pure Storage FlashBlade Object Storage + +USAGE: + singularity storage update s3 flashblade [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/gcs.md b/docs/en/cli-reference/storage/update/s3/gcs.md index fba04eb77..63d8e4769 100644 --- a/docs/en/cli-reference/storage/update/s3/gcs.md +++ b/docs/en/cli-reference/storage/update/s3/gcs.md @@ -33,14 +33,10 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint - Endpoint for Google Cloud Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | https://storage.googleapis.com | Google Cloud Storage endpoint @@ -178,6 +174,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -217,6 +233,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -357,6 +376,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -369,7 +393,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -471,6 +495,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -497,7 +545,7 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Google Cloud Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] @@ -529,18 +577,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/hetzner.md b/docs/en/cli-reference/storage/update/s3/hetzner.md new file mode 100644 index 000000000..33478845f --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/hetzner.md @@ -0,0 +1,637 @@ +# Hetzner Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 hetzner - Hetzner Object Storage + +USAGE: + singularity storage update s3 hetzner [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | hel1 | Helsinki + | fsn1 | Falkenstein + | nbg1 | Nuremberg + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | hel1.your-objectstorage.com | Helsinki + | fsn1.your-objectstorage.com | Falkenstein + | nbg1.your-objectstorage.com | Nuremberg + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/huaweiobs.md b/docs/en/cli-reference/storage/update/s3/huaweiobs.md index cb602aa98..f50095c9f 100644 --- a/docs/en/cli-reference/storage/update/s3/huaweiobs.md +++ b/docs/en/cli-reference/storage/update/s3/huaweiobs.md @@ -29,8 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: | af-south-1 | AF-Johannesburg @@ -50,7 +51,9 @@ DESCRIPTION: | ru-northwest-2 | RU-Moscow2 --endpoint - Endpoint for OBS API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | obs.af-south-1.myhuaweicloud.com | AF-Johannesburg @@ -197,6 +200,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -236,6 +259,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -376,6 +402,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -388,7 +419,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -490,6 +521,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -516,10 +571,10 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for OBS API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --region value Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint. [$REGION] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -547,18 +602,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/ibmcos.md b/docs/en/cli-reference/storage/update/s3/ibmcos.md index f7785224e..ccf546f83 100644 --- a/docs/en/cli-reference/storage/update/s3/ibmcos.md +++ b/docs/en/cli-reference/storage/update/s3/ibmcos.md @@ -33,16 +33,10 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint - Endpoint for IBM COS S3 API. + Endpoint for S3 API. - Specify if using an IBM COS On Premise. + Required when using an S3 clone. Examples: | s3.us.cloud-object-storage.appdomain.cloud | US Cross Region Endpoint @@ -71,11 +65,11 @@ DESCRIPTION: | s3.private.eu-de.cloud-object-storage.appdomain.cloud | EU Region DE Private Endpoint | s3.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Endpoint | s3.tok.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Tokyo Endpoint - | s3.hkg.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional HongKong Endpoint + | s3.hkg.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Hong Kong Endpoint | s3.seo.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Seoul Endpoint | s3.private.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Private Endpoint | s3.private.tok.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Tokyo Private Endpoint - | s3.private.hkg.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional HongKong Private Endpoint + | s3.private.hkg.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Hong Kong Private Endpoint | s3.private.seo.ap.cloud-object-storage.appdomain.cloud | APAC Cross Regional Seoul Private Endpoint | s3.jp-tok.cloud-object-storage.appdomain.cloud | APAC Region Japan Endpoint | s3.private.jp-tok.cloud-object-storage.appdomain.cloud | APAC Region Japan Private Endpoint @@ -109,9 +103,9 @@ DESCRIPTION: | s3.private.sng01.cloud-object-storage.appdomain.cloud | Singapore Single Site Private Endpoint --location-constraint - Location constraint - must match endpoint when using IBM Cloud Public. + Location constraint - must be set to match the Region. - For on-prem COS, do not make a selection from this list, hit enter. + Leave blank if not sure. Used when creating buckets only. Examples: | us-standard | US Cross Region Standard @@ -290,6 +284,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -329,6 +343,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -469,6 +486,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -481,7 +503,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -583,6 +605,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -602,19 +648,27 @@ DESCRIPTION: use `-vv` to see the debug level logs. + --ibm-api-key + IBM API Key to be used to obtain IAM token + + --ibm-resource-instance-id + IBM service instance id + --description Description of the remote. OPTIONS: - --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] - --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for IBM COS S3 API. [$ENDPOINT] - --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] - --help, -h show help - --location-constraint value Location constraint - must match endpoint when using IBM Cloud Public. [$LOCATION_CONSTRAINT] - --region value Region to connect to. [$REGION] - --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --ibm-api-key value IBM API Key to be used to obtain IAM token [$IBM_API_KEY] + --ibm-resource-instance-id value IBM service instance id [$IBM_RESOURCE_INSTANCE_ID] + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -641,18 +695,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/idrive.md b/docs/en/cli-reference/storage/update/s3/idrive.md index 98a08f536..c64730946 100644 --- a/docs/en/cli-reference/storage/update/s3/idrive.md +++ b/docs/en/cli-reference/storage/update/s3/idrive.md @@ -156,6 +156,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -195,6 +215,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -335,6 +358,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -347,7 +375,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -449,6 +477,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -504,18 +556,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/intercolo.md b/docs/en/cli-reference/storage/update/s3/intercolo.md new file mode 100644 index 000000000..639eaca5a --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/intercolo.md @@ -0,0 +1,627 @@ +# Intercolo Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 intercolo - Intercolo Object Storage + +USAGE: + singularity storage update s3 intercolo [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | de-fra | Frankfurt, Germany + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | de-fra.i3storage.com | Frankfurt, Germany + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/ionos.md b/docs/en/cli-reference/storage/update/s3/ionos.md index d67fe3634..212a27737 100644 --- a/docs/en/cli-reference/storage/update/s3/ionos.md +++ b/docs/en/cli-reference/storage/update/s3/ionos.md @@ -29,18 +29,18 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - Region where your bucket will be created and your data stored. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: - | de | Frankfurt, Germany | eu-central-2 | Berlin, Germany | eu-south-2 | Logrono, Spain --endpoint - Endpoint for IONOS S3 Object Storage. + Endpoint for S3 API. - Specify the endpoint from the same region. + Required when using an S3 clone. Examples: | s3-eu-central-1.ionoscloud.com | Frankfurt, Germany @@ -175,6 +175,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -214,6 +234,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -354,6 +377,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -366,7 +394,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -468,6 +496,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -494,10 +546,10 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for IONOS S3 Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --region value Region where your bucket will be created and your data stored. [$REGION] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -525,18 +577,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/leviia.md b/docs/en/cli-reference/storage/update/s3/leviia.md index 801667d19..65d485d74 100644 --- a/docs/en/cli-reference/storage/update/s3/leviia.md +++ b/docs/en/cli-reference/storage/update/s3/leviia.md @@ -33,17 +33,15 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. Required when using an S3 clone. + Examples: + | s3.leviia.com | The default endpoint + | | Leviia + --acl Canned ACL used when creating buckets and storing or copying objects. @@ -172,6 +170,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -211,6 +229,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -351,6 +372,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -363,7 +389,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -465,6 +491,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -522,18 +572,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/liara.md b/docs/en/cli-reference/storage/update/s3/liara.md index 06034a8fe..ae29a61c5 100644 --- a/docs/en/cli-reference/storage/update/s3/liara.md +++ b/docs/en/cli-reference/storage/update/s3/liara.md @@ -29,7 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Liara Object Storage API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | storage.iran.liara.space | The default endpoint @@ -73,10 +75,7 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in Liara - - Examples: - | STANDARD | Standard storage class + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -169,6 +168,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -208,6 +227,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -348,6 +370,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -360,7 +387,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -462,6 +489,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -488,11 +539,11 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Liara Object Storage API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in Liara [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -519,18 +570,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/linode.md b/docs/en/cli-reference/storage/update/s3/linode.md index 645da4ced..316a65b68 100644 --- a/docs/en/cli-reference/storage/update/s3/linode.md +++ b/docs/en/cli-reference/storage/update/s3/linode.md @@ -29,19 +29,32 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Linode Object Storage API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: - | us-southeast-1.linodeobjects.com | Atlanta, GA (USA), us-southeast-1 - | us-ord-1.linodeobjects.com | Chicago, IL (USA), us-ord-1 - | eu-central-1.linodeobjects.com | Frankfurt (Germany), eu-central-1 - | it-mil-1.linodeobjects.com | Milan (Italy), it-mil-1 - | us-east-1.linodeobjects.com | Newark, NJ (USA), us-east-1 - | fr-par-1.linodeobjects.com | Paris (France), fr-par-1 - | us-sea-1.linodeobjects.com | Seattle, WA (USA), us-sea-1 - | ap-south-1.linodeobjects.com | Singapore ap-south-1 - | se-sto-1.linodeobjects.com | Stockholm (Sweden), se-sto-1 - | us-iad-1.linodeobjects.com | Washington, DC, (USA), us-iad-1 + | nl-ams-1.linodeobjects.com | Amsterdam, NL (nl-ams-1) + | us-southeast-1.linodeobjects.com | Atlanta, GA, US (us-southeast-1) + | in-maa-1.linodeobjects.com | Chennai, IN (in-maa-1) + | us-ord-1.linodeobjects.com | Chicago, IL, US (us-ord-1) + | eu-central-1.linodeobjects.com | Frankfurt, DE (eu-central-1) + | id-cgk-1.linodeobjects.com | Jakarta, ID (id-cgk-1) + | gb-lon-1.linodeobjects.com | London 2, UK (gb-lon-1) + | us-lax-1.linodeobjects.com | Los Angeles, CA, US (us-lax-1) + | es-mad-1.linodeobjects.com | Madrid, ES (es-mad-1) + | us-mia-1.linodeobjects.com | Miami, FL, US (us-mia-1) + | it-mil-1.linodeobjects.com | Milan, IT (it-mil-1) + | us-east-1.linodeobjects.com | Newark, NJ, US (us-east-1) + | jp-osa-1.linodeobjects.com | Osaka, JP (jp-osa-1) + | fr-par-1.linodeobjects.com | Paris, FR (fr-par-1) + | br-gru-1.linodeobjects.com | Sao Paulo, BR (br-gru-1) + | us-sea-1.linodeobjects.com | Seattle, WA, US (us-sea-1) + | ap-south-1.linodeobjects.com | Singapore, SG (ap-south-1) + | sg-sin-1.linodeobjects.com | Singapore 2, SG (sg-sin-1) + | se-sto-1.linodeobjects.com | Stockholm, SE (se-sto-1) + | jp-tyo-1.linodeobjects.com | Tokyo 3, JP (jp-tyo-1) + | us-iad-10.linodeobjects.com | Washington, DC, US (us-iad-10) --acl Canned ACL used when creating buckets and storing or copying objects. @@ -171,6 +184,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -210,6 +243,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -350,6 +386,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -362,7 +403,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -464,6 +505,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -490,7 +555,7 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Linode Object Storage API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] @@ -520,18 +585,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/lyvecloud.md b/docs/en/cli-reference/storage/update/s3/lyvecloud.md index 54afbc6a3..5c578daa7 100644 --- a/docs/en/cli-reference/storage/update/s3/lyvecloud.md +++ b/docs/en/cli-reference/storage/update/s3/lyvecloud.md @@ -33,21 +33,14 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. Required when using an S3 clone. Examples: - | s3.us-east-1.lyvecloud.seagate.com | Seagate Lyve Cloud US East 1 (Virginia) - | s3.us-west-1.lyvecloud.seagate.com | Seagate Lyve Cloud US West 1 (California) - | s3.ap-southeast-1.lyvecloud.seagate.com | Seagate Lyve Cloud AP Southeast 1 (Singapore) + | s3.us-west-1.{account_name}.lyve.seagate.com | US West 1 - California + | s3.eu-west-1.{account_name}.lyve.seagate.com | EU West 1 - Ireland --location-constraint Location constraint - must be set to match the Region. @@ -182,6 +175,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -221,6 +234,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -361,6 +377,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -373,7 +394,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -475,6 +496,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -533,18 +578,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/magalu.md b/docs/en/cli-reference/storage/update/s3/magalu.md index f324f6dec..83af09c49 100644 --- a/docs/en/cli-reference/storage/update/s3/magalu.md +++ b/docs/en/cli-reference/storage/update/s3/magalu.md @@ -34,8 +34,8 @@ DESCRIPTION: Required when using an S3 clone. Examples: - | br-se1.magaluobjects.com | Magalu BR Southeast 1 endpoint - | br-ne1.magaluobjects.com | Magalu BR Northeast 1 endpoint + | br-se1.magaluobjects.com | São Paulo, SP (BR), br-se1 + | br-ne1.magaluobjects.com | Fortaleza, CE (BR), br-ne1 --acl Canned ACL used when creating buckets and storing or copying objects. @@ -75,10 +75,7 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in Magalu. - - Examples: - | STANDARD | Standard storage class + The storage class to use when storing new objects in S3. --upload-cutoff Cutoff for switching to chunked upload. @@ -171,6 +168,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -210,6 +227,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -350,6 +370,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -362,7 +387,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -464,6 +489,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -494,7 +543,7 @@ OPTIONS: --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in Magalu. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -521,18 +570,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/mega.md b/docs/en/cli-reference/storage/update/s3/mega.md new file mode 100644 index 000000000..277c65126 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/mega.md @@ -0,0 +1,606 @@ +# MEGA S4 Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 mega - MEGA S4 Object Storage + +USAGE: + singularity storage update s3 mega [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.eu-central-1.s4.mega.io | Mega S4 eu-central-1 (Amsterdam) + | s3.eu-central-2.s4.mega.io | Mega S4 eu-central-2 (Bettembourg) + | s3.ca-central-1.s4.mega.io | Mega S4 ca-central-1 (Montreal) + | s3.ca-west-1.s4.mega.io | Mega S4 ca-west-1 (Vancouver) + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/minio.md b/docs/en/cli-reference/storage/update/s3/minio.md index 8104f41e0..716d0d88a 100644 --- a/docs/en/cli-reference/storage/update/s3/minio.md +++ b/docs/en/cli-reference/storage/update/s3/minio.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -89,10 +83,6 @@ DESCRIPTION: --server-side-encryption The server-side encryption algorithm used when storing this object in S3. - Examples: - | | None - | AES256 | AES256 - --sse-customer-algorithm If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -223,6 +213,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -262,6 +272,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -402,6 +415,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -414,7 +432,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -516,6 +534,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -576,9 +618,14 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] @@ -587,11 +634,14 @@ OPTIONS: --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/netease.md b/docs/en/cli-reference/storage/update/s3/netease.md index 73d5dfe92..e37a7e693 100644 --- a/docs/en/cli-reference/storage/update/s3/netease.md +++ b/docs/en/cli-reference/storage/update/s3/netease.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -177,6 +171,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -216,6 +230,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -356,6 +373,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -368,7 +390,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -470,6 +492,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -528,18 +574,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/other.md b/docs/en/cli-reference/storage/update/s3/other.md index 1e2769763..63396fa46 100644 --- a/docs/en/cli-reference/storage/update/s3/other.md +++ b/docs/en/cli-reference/storage/update/s3/other.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -177,6 +171,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -216,6 +230,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -356,6 +373,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -368,7 +390,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -470,6 +492,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -528,18 +574,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/outscale.md b/docs/en/cli-reference/storage/update/s3/outscale.md new file mode 100644 index 000000000..a4e7ba6a7 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/outscale.md @@ -0,0 +1,635 @@ +# OUTSCALE Object Storage (OOS) + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 outscale - OUTSCALE Object Storage (OOS) + +USAGE: + singularity storage update s3 outscale [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | eu-west-2 | Paris, France + | us-east-2 | New Jersey, USA + | us-west-1 | California, USA + | cloudgouv-eu-west-1 | SecNumCloud, Paris, France + | ap-northeast-1 | Tokyo, Japan + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | oos.eu-west-2.outscale.com | Outscale EU West 2 (Paris) + | oos.us-east-2.outscale.com | Outscale US east 2 (New Jersey) + | oos.us-west-1.outscale.com | Outscale EU West 1 (California) + | oos.cloudgouv-eu-west-1.outscale.com | Outscale SecNumCloud (Paris) + | oos.ap-northeast-1.outscale.com | Outscale AP Northeast 1 (Japan) + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/ovhcloud.md b/docs/en/cli-reference/storage/update/s3/ovhcloud.md new file mode 100644 index 000000000..08a4f2aca --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/ovhcloud.md @@ -0,0 +1,654 @@ +# OVHcloud Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 ovhcloud - OVHcloud Object Storage + +USAGE: + singularity storage update s3 ovhcloud [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | gra | Gravelines, France + | rbx | Roubaix, France + | sbg | Strasbourg, France + | eu-west-par | Paris, France (3AZ) + | uk | London, United Kingdom + | waw | Warsaw, Poland + | bhs | Beauharnois, Canada + | ca-east-tor | Toronto, Canada + | sgp | Singapore + | ap-southeast-syd | Sydney, Australia + | ap-south-mum | Mumbai, India + | us-east-va | Vint Hill, Virginia, USA + | us-west-or | Hillsboro, Oregon, USA + | rbx-archive | Roubaix, France (Cold Archive) + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.gra.io.cloud.ovh.net | OVHcloud Gravelines, France + | s3.rbx.io.cloud.ovh.net | OVHcloud Roubaix, France + | s3.sbg.io.cloud.ovh.net | OVHcloud Strasbourg, France + | s3.eu-west-par.io.cloud.ovh.net | OVHcloud Paris, France (3AZ) + | s3.de.io.cloud.ovh.net | OVHcloud Frankfurt, Germany + | s3.uk.io.cloud.ovh.net | OVHcloud London, United Kingdom + | s3.waw.io.cloud.ovh.net | OVHcloud Warsaw, Poland + | s3.bhs.io.cloud.ovh.net | OVHcloud Beauharnois, Canada + | s3.ca-east-tor.io.cloud.ovh.net | OVHcloud Toronto, Canada + | s3.sgp.io.cloud.ovh.net | OVHcloud Singapore + | s3.ap-southeast-syd.io.cloud.ovh.net | OVHcloud Sydney, Australia + | s3.ap-south-mum.io.cloud.ovh.net | OVHcloud Mumbai, India + | s3.us-east-va.io.cloud.ovh.us | OVHcloud Vint Hill, Virginia, USA + | s3.us-west-or.io.cloud.ovh.us | OVHcloud Hillsboro, Oregon, USA + | s3.rbx-archive.io.cloud.ovh.net | OVHcloud Roubaix, France (Cold Archive) + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/petabox.md b/docs/en/cli-reference/storage/update/s3/petabox.md index 34574aa47..6be363aa0 100644 --- a/docs/en/cli-reference/storage/update/s3/petabox.md +++ b/docs/en/cli-reference/storage/update/s3/petabox.md @@ -29,20 +29,20 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - Region where your bucket will be created and your data stored. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: - | us-east-1 | US East (N. Virginia) | eu-central-1 | Europe (Frankfurt) | ap-southeast-1 | Asia Pacific (Singapore) | me-south-1 | Middle East (Bahrain) | sa-east-1 | South America (São Paulo) --endpoint - Endpoint for Petabox S3 Object Storage. + Endpoint for S3 API. - Specify the endpoint from the same region. + Required when using an S3 clone. Examples: | s3.petabox.io | US East (N. Virginia) @@ -180,6 +180,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -219,6 +239,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -359,6 +382,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -371,7 +399,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -473,6 +501,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -499,10 +551,10 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Petabox S3 Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --region value Region where your bucket will be created and your data stored. [$REGION] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -530,18 +582,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/qiniu.md b/docs/en/cli-reference/storage/update/s3/qiniu.md index 9e01d2f2a..44220f8c4 100644 --- a/docs/en/cli-reference/storage/update/s3/qiniu.md +++ b/docs/en/cli-reference/storage/update/s3/qiniu.md @@ -30,6 +30,8 @@ DESCRIPTION: --region Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. Examples: | cn-east-1 | The default endpoint - a good choice if you are unsure. @@ -49,7 +51,9 @@ DESCRIPTION: | | Needs location constraint ap-northeast-1. --endpoint - Endpoint for Qiniu Object Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3-cn-east-1.qiniucs.com | East China Endpoint 1 @@ -63,7 +67,7 @@ DESCRIPTION: --location-constraint Location constraint - must be set to match the Region. - Used when creating buckets only. + Leave blank if not sure. Used when creating buckets only. Examples: | cn-east-1 | East China Region 1 @@ -112,12 +116,10 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in Qiniu. + The storage class to use when storing new objects in S3. Examples: - | STANDARD | Standard storage class | LINE | Infrequent access storage mode - | GLACIER | Archive storage mode | DEEP_ARCHIVE | Deep archive storage mode --upload-cutoff @@ -211,6 +213,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -250,6 +272,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -390,6 +415,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -402,7 +432,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -504,6 +534,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -530,13 +584,13 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Qiniu Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in Qiniu. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -563,18 +617,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/rabata.md b/docs/en/cli-reference/storage/update/s3/rabata.md new file mode 100644 index 000000000..a58b58e65 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/rabata.md @@ -0,0 +1,602 @@ +# Rabata Cloud Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 rabata - Rabata Cloud Storage + +USAGE: + singularity storage update s3 rabata [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | eu-west-1 | EU (Ireland) + | eu-west-2 | EU (London) + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.us-east-1.rabata.io | US East (N. Virginia) + | s3.eu-west-1.rabata.io | EU West (Ireland) + | s3.eu-west-2.rabata.io | EU West (London) + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + Examples: + | us-east-1 | US East (N. Virginia) + | eu-west-1 | EU (Ireland) + | eu-west-2 | EU (London) + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/rackcorp.md b/docs/en/cli-reference/storage/update/s3/rackcorp.md index 0729a33a3..27ee154c6 100644 --- a/docs/en/cli-reference/storage/update/s3/rackcorp.md +++ b/docs/en/cli-reference/storage/update/s3/rackcorp.md @@ -29,8 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - region - the location where your bucket will be created and your data stored. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: | global | Global CDN (All locations) Region @@ -54,7 +55,9 @@ DESCRIPTION: | nz | Auckland (New Zealand) Region --endpoint - Endpoint for RackCorp Object Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3.rackcorp.com | Global (AnyCast) Endpoint @@ -78,8 +81,9 @@ DESCRIPTION: | nz.s3.rackcorp.com | Auckland (New Zealand) Endpoint --location-constraint - Location constraint - the location where your bucket will be located and your data stored. + Location constraint - must be set to match the Region. + Leave blank if not sure. Used when creating buckets only. Examples: | global | Global CDN Region @@ -99,7 +103,7 @@ DESCRIPTION: | de | Frankfurt (Germany) Region | us | USA (AnyCast) Region | us-east-1 | New York (USA) Region - | us-west-1 | Freemont (USA) Region + | us-west-1 | Fremont (USA) Region | nz | Auckland (New Zealand) Region --acl @@ -230,6 +234,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -269,6 +293,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -409,6 +436,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -421,7 +453,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -523,6 +555,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -549,11 +605,11 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for RackCorp Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help - --location-constraint value Location constraint - the location where your bucket will be located and your data stored. [$LOCATION_CONSTRAINT] - --region value region - the location where your bucket will be created and your data stored. [$REGION] + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced @@ -581,18 +637,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/rclone.md b/docs/en/cli-reference/storage/update/s3/rclone.md index a95e51d9c..fa5ca7c2a 100644 --- a/docs/en/cli-reference/storage/update/s3/rclone.md +++ b/docs/en/cli-reference/storage/update/s3/rclone.md @@ -28,64 +28,11 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. - --region - Region to connect to. - - Leave blank if you are using an S3 clone and you don't have a region. - - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. Required when using an S3 clone. - --location-constraint - Location constraint - must be set to match the Region. - - Leave blank if not sure. Used when creating buckets only. - - --acl - Canned ACL used when creating buckets and storing or copying objects. - - This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when server-side copying objects as S3 - doesn't copy the ACL from the source but rather writes a fresh one. - - If the acl is an empty string then no X-Amz-Acl: header is added and - the default (private) will be used. - - - --bucket-acl - Canned ACL used when creating buckets. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when only when creating buckets. If it - isn't set then "acl" is used instead. - - If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: - header is added and the default (private) will be used. - - - Examples: - | private | Owner gets FULL_CONTROL. - | | No one else has access rights (default). - | public-read | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ access. - | public-read-write | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ and WRITE access. - | | Granting this on a bucket is generally not recommended. - | authenticated-read | Owner gets FULL_CONTROL. - | | The AuthenticatedUsers group gets READ access. - --upload-cutoff Cutoff for switching to chunked upload. @@ -177,6 +124,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -216,6 +183,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -356,6 +326,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -368,7 +343,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -470,6 +445,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -494,18 +493,14 @@ DESCRIPTION: OPTIONS: - --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] - --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for S3 API. [$ENDPOINT] - --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] - --help, -h show help - --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] - --region value Region to connect to. [$REGION] - --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] @@ -528,18 +523,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/scaleway.md b/docs/en/cli-reference/storage/update/s3/scaleway.md index 630ac70a0..38afa2e10 100644 --- a/docs/en/cli-reference/storage/update/s3/scaleway.md +++ b/docs/en/cli-reference/storage/update/s3/scaleway.md @@ -30,6 +30,8 @@ DESCRIPTION: --region Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. Examples: | nl-ams | Amsterdam, The Netherlands @@ -37,7 +39,9 @@ DESCRIPTION: | pl-waw | Warsaw, Poland --endpoint - Endpoint for Scaleway Object Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3.nl-ams.scw.cloud | Amsterdam Endpoint @@ -187,6 +191,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -226,6 +250,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -366,6 +393,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -378,7 +410,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -480,6 +512,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -506,7 +562,7 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Scaleway Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --region value Region to connect to. [$REGION] @@ -538,18 +594,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/seaweedfs.md b/docs/en/cli-reference/storage/update/s3/seaweedfs.md index af0560b4d..e9e432d6e 100644 --- a/docs/en/cli-reference/storage/update/s3/seaweedfs.md +++ b/docs/en/cli-reference/storage/update/s3/seaweedfs.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -180,6 +174,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -219,6 +233,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -359,6 +376,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -371,7 +393,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -473,6 +495,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -531,18 +577,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/selectel.md b/docs/en/cli-reference/storage/update/s3/selectel.md new file mode 100644 index 000000000..fbedb7731 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/selectel.md @@ -0,0 +1,593 @@ +# Selectel Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 selectel - Selectel Object Storage + +USAGE: + singularity storage update s3 selectel [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | ru-3 | St. Petersburg + | kz-1 | Kazakhstan + | uz-2 | Uzbekistan + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.ru-1.storage.selcloud.ru | St. Petersburg + | s3.ru-3.storage.selcloud.ru | St. Petersburg + | s3.kz-1.storage.selcloud.ru | Kazakhstan + | s3.uz-2.storage.selcloud.ru | Uzbekistan + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/servercore.md b/docs/en/cli-reference/storage/update/s3/servercore.md new file mode 100644 index 000000000..4a46d68a2 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/servercore.md @@ -0,0 +1,615 @@ +# Servercore Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 servercore - Servercore Object Storage + +USAGE: + singularity storage update s3 servercore [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | uz-2 | Tashkent, Uzbekistan + | kz-1 | Almaty, Kazakhstan + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | s3.ru-1.storage.selcloud.ru | Saint Petersburg + | s3.uz-2.srvstorage.uz | Tashkent, Uzbekistan + | s3.kz-1.srvstorage.kz | Almaty, Kazakhstan + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/spectralogic.md b/docs/en/cli-reference/storage/update/s3/spectralogic.md new file mode 100644 index 000000000..d1dd32a74 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/spectralogic.md @@ -0,0 +1,576 @@ +# Spectra Logic Black Pearl + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 spectralogic - Spectra Logic Black Pearl + +USAGE: + singularity storage update s3 spectralogic [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/stackpath.md b/docs/en/cli-reference/storage/update/s3/stackpath.md index 91819dfb3..12ef47f51 100644 --- a/docs/en/cli-reference/storage/update/s3/stackpath.md +++ b/docs/en/cli-reference/storage/update/s3/stackpath.md @@ -33,14 +33,10 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint - Endpoint for StackPath Object Storage. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | s3.us-east-2.stackpathstorage.com | US East Endpoint @@ -175,6 +171,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -214,6 +230,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -354,6 +373,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -366,7 +390,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -468,6 +492,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -494,7 +542,7 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for StackPath Object Storage. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --region value Region to connect to. [$REGION] @@ -525,18 +573,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/storj.md b/docs/en/cli-reference/storage/update/s3/storj.md index d367d255b..5c51ec6b1 100644 --- a/docs/en/cli-reference/storage/update/s3/storj.md +++ b/docs/en/cli-reference/storage/update/s3/storj.md @@ -29,33 +29,12 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Storj Gateway. - - Examples: - | gateway.storjshare.io | Global Hosted Gateway - - --bucket-acl - Canned ACL used when creating buckets. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when only when creating buckets. If it - isn't set then "acl" is used instead. - - If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: - header is added and the default (private) will be used. + Endpoint for S3 API. + Required when using an S3 clone. Examples: - | private | Owner gets FULL_CONTROL. - | | No one else has access rights (default). - | public-read | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ access. - | public-read-write | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ and WRITE access. - | | Granting this on a bucket is generally not recommended. - | authenticated-read | Owner gets FULL_CONTROL. - | | The AuthenticatedUsers group gets READ access. + | gateway.storjshare.io | Global Hosted Gateway --upload-cutoff Cutoff for switching to chunked upload. @@ -148,6 +127,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -187,6 +186,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -327,6 +329,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -339,7 +346,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -441,6 +448,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -466,14 +497,13 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] - --endpoint value Endpoint for Storj Gateway. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] @@ -496,18 +526,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/synology.md b/docs/en/cli-reference/storage/update/s3/synology.md index d8e3e6642..b6e14ebbf 100644 --- a/docs/en/cli-reference/storage/update/s3/synology.md +++ b/docs/en/cli-reference/storage/update/s3/synology.md @@ -29,8 +29,9 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --region - Region where your data stored. + Region to connect to. + Leave blank if you are using an S3 clone and you don't have a region. Examples: | eu-001 | Europe Region 1 @@ -40,7 +41,9 @@ DESCRIPTION: | tw-001 | Asia (Taiwan) --endpoint - Endpoint for Synology C2 Object Storage API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | eu-001.s3.synologyc2.net | EU Endpoint 1 @@ -54,29 +57,6 @@ DESCRIPTION: Leave blank if not sure. Used when creating buckets only. - --bucket-acl - Canned ACL used when creating buckets. - - For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - - Note that this ACL is applied when only when creating buckets. If it - isn't set then "acl" is used instead. - - If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: - header is added and the default (private) will be used. - - - Examples: - | private | Owner gets FULL_CONTROL. - | | No one else has access rights (default). - | public-read | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ access. - | public-read-write | Owner gets FULL_CONTROL. - | | The AllUsers group gets READ and WRITE access. - | | Granting this on a bucket is generally not recommended. - | authenticated-read | Owner gets FULL_CONTROL. - | | The AuthenticatedUsers group gets READ access. - --upload-cutoff Cutoff for switching to chunked upload. @@ -168,6 +148,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -207,6 +207,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -347,6 +350,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -359,7 +367,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -461,6 +469,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -486,16 +518,15 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] - --endpoint value Endpoint for Synology C2 Object Storage API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] - --region value Region where your data stored. [$REGION] + --region value Region to connect to. [$REGION] --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] @@ -518,18 +549,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/tencentcos.md b/docs/en/cli-reference/storage/update/s3/tencentcos.md index 23ea6e14e..a96b24351 100644 --- a/docs/en/cli-reference/storage/update/s3/tencentcos.md +++ b/docs/en/cli-reference/storage/update/s3/tencentcos.md @@ -29,14 +29,15 @@ DESCRIPTION: Leave blank for anonymous access or runtime credentials. --endpoint - Endpoint for Tencent COS API. + Endpoint for S3 API. + + Required when using an S3 clone. Examples: | cos.ap-beijing.myqcloud.com | Beijing Region | cos.ap-nanjing.myqcloud.com | Nanjing Region | cos.ap-shanghai.myqcloud.com | Shanghai Region | cos.ap-guangzhou.myqcloud.com | Guangzhou Region - | cos.ap-nanjing.myqcloud.com | Nanjing Region | cos.ap-chengdu.myqcloud.com | Chengdu Region | cos.ap-chongqing.myqcloud.com | Chongqing Region | cos.ap-hongkong.myqcloud.com | Hong Kong (China) Region @@ -94,13 +95,10 @@ DESCRIPTION: | | The AuthenticatedUsers group gets READ access. --storage-class - The storage class to use when storing new objects in Tencent COS. + The storage class to use when storing new objects in S3. Examples: - | | Default - | STANDARD | Standard storage class - | ARCHIVE | Archive storage mode - | STANDARD_IA | Infrequent access storage mode + | ARCHIVE | Archive storage mode --upload-cutoff Cutoff for switching to chunked upload. @@ -193,6 +191,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -232,6 +250,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -372,6 +393,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -384,7 +410,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -486,6 +512,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -512,11 +562,11 @@ DESCRIPTION: OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] - --endpoint value Endpoint for Tencent COS API. [$ENDPOINT] + --endpoint value Endpoint for S3 API. [$ENDPOINT] --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] --help, -h show help --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] - --storage-class value The storage class to use when storing new objects in Tencent COS. [$STORAGE_CLASS] + --storage-class value The storage class to use when storing new objects in S3. [$STORAGE_CLASS] Advanced @@ -543,18 +593,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/wasabi.md b/docs/en/cli-reference/storage/update/s3/wasabi.md index e5d0d4c0d..8e655e3a3 100644 --- a/docs/en/cli-reference/storage/update/s3/wasabi.md +++ b/docs/en/cli-reference/storage/update/s3/wasabi.md @@ -33,12 +33,6 @@ DESCRIPTION: Leave blank if you are using an S3 clone and you don't have a region. - Examples: - | | Use this if unsure. - | | Will use v4 signatures and an empty region. - | other-v2-signature | Use this only if v4 signatures don't work. - | | E.g. pre Jewel/v10 CEPH. - --endpoint Endpoint for S3 API. @@ -54,6 +48,7 @@ DESCRIPTION: | s3.eu-central-2.wasabisys.com | Wasabi EU Central 2 (Frankfurt) | s3.eu-west-1.wasabisys.com | Wasabi EU West 1 (London) | s3.eu-west-2.wasabisys.com | Wasabi EU West 2 (Paris) + | s3.eu-south-1.wasabisys.com | Wasabi EU South 1 (Milan) | s3.ap-northeast-1.wasabisys.com | Wasabi AP Northeast 1 (Tokyo) endpoint | s3.ap-northeast-2.wasabisys.com | Wasabi AP Northeast 2 (Osaka) endpoint | s3.ap-southeast-1.wasabisys.com | Wasabi AP Southeast 1 (Singapore) @@ -192,6 +187,26 @@ DESCRIPTION: --session-token An AWS session token. + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + --upload-concurrency Concurrency for multipart uploads and copies. @@ -231,6 +246,9 @@ DESCRIPTION: See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-arn-region + If true, enables arn region support for the service. + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -371,6 +389,11 @@ DESCRIPTION: circumstances or for testing. + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + --versions Include old versions in directory listings. @@ -383,7 +406,7 @@ DESCRIPTION: Note that when using this no file write operations are permitted, so you can't upload files or delete them. - See [the time option docs](/docs/#time-option) for valid formats. + See [the time option docs](/docs/#time-options) for valid formats. --version-deleted @@ -485,6 +508,30 @@ DESCRIPTION: knows about - please make a bug report if not. + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + --sdk-log-mode Set to debug the SDK @@ -543,18 +590,26 @@ OPTIONS: --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] --session-token value An AWS session token. [$SESSION_TOKEN] --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] diff --git a/docs/en/cli-reference/storage/update/s3/zata.md b/docs/en/cli-reference/storage/update/s3/zata.md new file mode 100644 index 000000000..304706374 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/zata.md @@ -0,0 +1,633 @@ +# Zata (S3 compatible Gateway) + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 zata - Zata (S3 compatible Gateway) + +USAGE: + singularity storage update s3 zata [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | us-east-1 | Indore, Madhya Pradesh, India + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | idr01.zata.ai | South Asia Endpoint + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --role-arn + ARN of the IAM role to assume. + + Leave blank if not using assume role. + + --role-session-name + Session name for assumed role. + + If empty, a session name will be generated automatically. + + --role-session-duration + Session duration for assumed role. + + If empty, the default session duration will be used. + + --role-external-id + External ID for assumed role. + + Leave blank if not using an external ID. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --use-arn-region + If true, enables arn region support for the service. + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --use-data-integrity-protections + If true use AWS S3 data integrity protections. + + See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-options) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-x-id + Set if rclone should add x-id URL parameters. + + You can change this if you want to disable the AWS SDK from + adding x-id URL parameters. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sign-accept-encoding + Set if rclone should include Accept-Encoding as part of the signature. + + You can change this if you want to stop rclone including + Accept-Encoding as part of the signature. + + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --role-arn value ARN of the IAM role to assume. [$ROLE_ARN] + --role-external-id value External ID for assumed role. [$ROLE_EXTERNAL_ID] + --role-session-duration value Session duration for assumed role. [$ROLE_SESSION_DURATION] + --role-session-name value Session name for assumed role. [$ROLE_SESSION_NAME] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sign-accept-encoding value Set if rclone should include Accept-Encoding as part of the signature. (default: "unset") [$SIGN_ACCEPT_ENCODING] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-arn-region If true, enables arn region support for the service. (default: false) [$USE_ARN_REGION] + --use-data-integrity-protections value If true use AWS S3 data integrity protections. (default: "unset") [$USE_DATA_INTEGRITY_PROTECTIONS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --use-x-id value Set if rclone should add x-id URL parameters. (default: "unset") [$USE_X_ID] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/seafile.md b/docs/en/cli-reference/storage/update/seafile.md index 84ce28d8d..82d9593ec 100644 --- a/docs/en/cli-reference/storage/update/seafile.md +++ b/docs/en/cli-reference/storage/update/seafile.md @@ -63,7 +63,7 @@ OPTIONS: --create-library Should rclone create a library if it doesn't exist. (default: false) [$CREATE_LIBRARY] --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8") [$ENCODING] + --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8,Dot") [$ENCODING] Client Config diff --git a/docs/en/cli-reference/storage/update/sftp.md b/docs/en/cli-reference/storage/update/sftp.md index 46adbbb62..d0dd5dd77 100644 --- a/docs/en/cli-reference/storage/update/sftp.md +++ b/docs/en/cli-reference/storage/update/sftp.md @@ -49,6 +49,11 @@ DESCRIPTION: Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys in the new OpenSSH format can't be used. + --pubkey + SSH public certificate for public certificate based authentication. + Set this if you have a signed certificate you want to use for authentication. + If specified will override pubkey_file. + --pubkey-file Optional path to public key file. @@ -147,13 +152,41 @@ DESCRIPTION: | powershell | PowerShell | cmd | Windows Command Prompt + --hashes + Comma separated list of supported checksum types. + --md5sum-command - The command used to read md5 hashes. + The command used to read MD5 hashes. Leave blank for autodetect. --sha1sum-command - The command used to read sha1 hashes. + The command used to read SHA-1 hashes. + + Leave blank for autodetect. + + --crc32sum-command + The command used to read CRC-32 hashes. + + Leave blank for autodetect. + + --sha256sum-command + The command used to read SHA-256 hashes. + + Leave blank for autodetect. + + --blake3sum-command + The command used to read BLAKE3 hashes. + + Leave blank for autodetect. + + --xxh3sum-command + The command used to read XXH3 hashes. + + Leave blank for autodetect. + + --xxh128sum-command + The command used to read XXH128 hashes. Leave blank for autodetect. @@ -375,6 +408,18 @@ DESCRIPTION: myUser:myPass@localhost:9005 + --http-proxy + URL for HTTP CONNECT proxy + + Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. + + Supports the format http://user:pass@host:port, http://host:port, http://host. + + Example: + + http://myUser:myPass@proxyhostname.example.com:8000 + + --copy-is-hardlink Set to enable server side copies using hardlinks. @@ -407,6 +452,7 @@ OPTIONS: --key-use-agent When set forces the usage of the ssh-agent. (default: false) [$KEY_USE_AGENT] --pass value SSH password, leave blank to use ssh-agent. [$PASS] --port value SSH port number. (default: 22) [$PORT] + --pubkey value SSH public certificate for public certificate based authentication. [$PUBKEY] --pubkey-file value Optional path to public key file. [$PUBKEY_FILE] --ssh value Path and arguments to external ssh binary. [$SSH] --use-insecure-cipher Enable the use of insecure ciphers and key exchange methods. (default: false) [$USE_INSECURE_CIPHER] @@ -415,30 +461,37 @@ OPTIONS: Advanced --ask-password Allow asking for SFTP password when needed. (default: false) [$ASK_PASSWORD] + --blake3sum-command value The command used to read BLAKE3 hashes. [$BLAKE3SUM_COMMAND] --chunk-size value Upload and download chunk size. (default: "32Ki") [$CHUNK_SIZE] --ciphers value Space separated list of ciphers to be used for session encryption, ordered by preference. [$CIPHERS] --concurrency value The maximum number of outstanding requests for one file (default: 64) [$CONCURRENCY] --connections value Maximum number of SFTP simultaneous connections, 0 for unlimited. (default: 0) [$CONNECTIONS] --copy-is-hardlink Set to enable server side copies using hardlinks. (default: false) [$COPY_IS_HARDLINK] + --crc32sum-command value The command used to read CRC-32 hashes. [$CRC32SUM_COMMAND] --description value Description of the remote. [$DESCRIPTION] --disable-concurrent-reads If set don't use concurrent reads. (default: false) [$DISABLE_CONCURRENT_READS] --disable-concurrent-writes If set don't use concurrent writes. (default: false) [$DISABLE_CONCURRENT_WRITES] + --hashes value Comma separated list of supported checksum types. [$HASHES] --host-key-algorithms value Space separated list of host key algorithms, ordered by preference. [$HOST_KEY_ALGORITHMS] + --http-proxy value URL for HTTP CONNECT proxy [$HTTP_PROXY] --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] --key-exchange value Space separated list of key exchange algorithms, ordered by preference. [$KEY_EXCHANGE] --known-hosts-file value Optional path to known_hosts file. [$KNOWN_HOSTS_FILE] --macs value Space separated list of MACs (message authentication code) algorithms, ordered by preference. [$MACS] - --md5sum-command value The command used to read md5 hashes. [$MD5SUM_COMMAND] + --md5sum-command value The command used to read MD5 hashes. [$MD5SUM_COMMAND] --path-override value Override path used by SSH shell commands. [$PATH_OVERRIDE] --server-command value Specifies the path or command to run a sftp server on the remote host. [$SERVER_COMMAND] --set-env value Environment variables to pass to sftp and commands [$SET_ENV] --set-modtime Set the modified time on the remote if set. (default: true) [$SET_MODTIME] - --sha1sum-command value The command used to read sha1 hashes. [$SHA1SUM_COMMAND] + --sha1sum-command value The command used to read SHA-1 hashes. [$SHA1SUM_COMMAND] + --sha256sum-command value The command used to read SHA-256 hashes. [$SHA256SUM_COMMAND] --shell-type value The type of SSH shell on remote server, if any. [$SHELL_TYPE] --skip-links Set to skip any symlinks and any other non regular files. (default: false) [$SKIP_LINKS] --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] --subsystem value Specifies the SSH2 subsystem on the remote host. (default: "sftp") [$SUBSYSTEM] --use-fstat If set use fstat instead of stat. (default: false) [$USE_FSTAT] + --xxh128sum-command value The command used to read XXH128 hashes. [$XXH128SUM_COMMAND] + --xxh3sum-command value The command used to read XXH3 hashes. [$XXH3SUM_COMMAND] Client Config diff --git a/docs/en/cli-reference/storage/update/sharefile.md b/docs/en/cli-reference/storage/update/sharefile.md index 4b2738a18..707a67197 100644 --- a/docs/en/cli-reference/storage/update/sharefile.md +++ b/docs/en/cli-reference/storage/update/sharefile.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --upload-cutoff Cutoff for switching to multipart upload. @@ -84,6 +91,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Upload chunk size. (default: "64Mi") [$CHUNK_SIZE] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] --endpoint value Endpoint for API calls. [$ENDPOINT] diff --git a/docs/en/cli-reference/storage/update/smb.md b/docs/en/cli-reference/storage/update/smb.md index 978c72dce..24a52d6e8 100644 --- a/docs/en/cli-reference/storage/update/smb.md +++ b/docs/en/cli-reference/storage/update/smb.md @@ -37,6 +37,15 @@ DESCRIPTION: Leave blank if not sure. + --use-kerberos + Use Kerberos authentication. + + If set, rclone will use Kerberos authentication instead of NTLM. This + requires a valid Kerberos configuration and credentials cache to be + available, either in the default locations or as specified by the + KRB5_CONFIG and KRB5CCNAME environment variables. + + --idle-timeout Max time before closing idle connections. @@ -54,6 +63,19 @@ DESCRIPTION: Always true on Windows shares. + --kerberos-ccache + Path to the Kerberos credential cache (krb5cc). + + Overrides the default KRB5CCNAME environment variable and allows this + instance of the SMB backend to use a different Kerberos cache file. + This is useful when mounting multiple SMB with different credentials + or running in multi-user environments. + + Supported formats: + - FILE:/path/to/ccache – Use the specified file. + - DIR:/path/to/ccachedir – Use the primary file inside the specified directory. + - /path/to/ccache – Interpreted as a file path. + --encoding The encoding for the backend. @@ -70,15 +92,17 @@ OPTIONS: --pass value SMB password. [$PASS] --port value SMB port number. (default: 445) [$PORT] --spn value Service principal name. [$SPN] + --use-kerberos Use Kerberos authentication. (default: false) [$USE_KERBEROS] --user value SMB username. (default: "$USER") [$USER] Advanced - --case-insensitive Whether the server is configured to be case-insensitive. (default: true) [$CASE_INSENSITIVE] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] - --hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access. (default: true) [$HIDE_SPECIAL_SHARE] - --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] + --case-insensitive Whether the server is configured to be case-insensitive. (default: true) [$CASE_INSENSITIVE] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] + --hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access. (default: true) [$HIDE_SPECIAL_SHARE] + --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] + --kerberos-ccache value Path to the Kerberos credential cache (krb5cc). [$KERBEROS_CCACHE] Client Config diff --git a/docs/en/cli-reference/storage/update/swift.md b/docs/en/cli-reference/storage/update/swift.md index 306108160..188450302 100644 --- a/docs/en/cli-reference/storage/update/swift.md +++ b/docs/en/cli-reference/storage/update/swift.md @@ -126,7 +126,7 @@ DESCRIPTION: --chunk-size Above this size files will be chunked. - Above this size files will be chunked into a a `_segments` container + Above this size files will be chunked into a `_segments` container or a `.file-segments` directory. (See the `use_segments_container` option for more info). Default for this is 5 GiB which is its maximum value, which means only files above this size will be chunked. diff --git a/docs/en/cli-reference/storage/update/uptobox.md b/docs/en/cli-reference/storage/update/uptobox.md deleted file mode 100644 index a1081f26c..000000000 --- a/docs/en/cli-reference/storage/update/uptobox.md +++ /dev/null @@ -1,64 +0,0 @@ -# Uptobox - -{% code fullWidth="true" %} -``` -NAME: - singularity storage update uptobox - Uptobox - -USAGE: - singularity storage update uptobox [command options] - -DESCRIPTION: - --access-token - Your access token. - - Get it from https://uptobox.com/my_account. - - --private - Set to make uploaded files private - - --encoding - The encoding for the backend. - - See the [encoding section in the overview](/overview/#encoding) for more info. - - --description - Description of the remote. - - -OPTIONS: - --access-token value Your access token. [$ACCESS_TOKEN] - --help, -h show help - - Advanced - - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] - --private Set to make uploaded files private (default: false) [$PRIVATE] - - Client Config - - --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. - --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. - --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) - --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) - --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" - --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) - --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. - --client-no-gzip Don't set Accept-Encoding: gzip (default: false) - --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) - --client-timeout value IO idle timeout (default: 5m0s) - --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) - - Retry Strategy - - --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) - --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) - --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) - --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) - --client-retry-max value Max number of retries for IO read errors (default: 10) - --client-skip-inaccessible Skip inaccessible files when opening (default: false) - -``` -{% endcode %} diff --git a/docs/en/cli-reference/storage/update/webdav.md b/docs/en/cli-reference/storage/update/webdav.md index 64a4df547..fdd8528e3 100644 --- a/docs/en/cli-reference/storage/update/webdav.md +++ b/docs/en/cli-reference/storage/update/webdav.md @@ -20,7 +20,8 @@ DESCRIPTION: Examples: | fastmail | Fastmail Files | nextcloud | Nextcloud - | owncloud | Owncloud + | owncloud | Owncloud 10 PHP based WebDAV server + | infinitescale | ownCloud Infinite Scale | sharepoint | Sharepoint Online, authenticated by Microsoft account | sharepoint-ntlm | Sharepoint with NTLM authentication, usually self-hosted or on-premises | rclone | rclone WebDAV server to serve a remote over HTTP via the WebDAV protocol @@ -81,6 +82,21 @@ DESCRIPTION: --unix-socket Path to a unix domain socket to dial to, instead of opening a TCP connection directly + --auth-redirect + Preserve authentication on redirect. + + If the server redirects rclone to a new domain when it is trying to + read a file then normally rclone will drop the Authorization: header + from the request. + + This is standard security practice to avoid sending your credentials + to an unknown webserver. + + However this is desirable in some circumstances. If you are getting + an error like "401 Unauthorized" when rclone is attempting to read + files from the webdav server then you can try this option. + + --description Description of the remote. @@ -95,6 +111,7 @@ OPTIONS: Advanced + --auth-redirect Preserve authentication on redirect. (default: false) [$AUTH_REDIRECT] --bearer-token-command value Command to run to get a bearer token. [$BEARER_TOKEN_COMMAND] --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. [$ENCODING] diff --git a/docs/en/cli-reference/storage/update/yandex.md b/docs/en/cli-reference/storage/update/yandex.md index 2ed6937ca..c1e5ec5c3 100644 --- a/docs/en/cli-reference/storage/update/yandex.md +++ b/docs/en/cli-reference/storage/update/yandex.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --hard-delete Delete files permanently rather than putting them into the trash. @@ -54,13 +61,14 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] - --spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance. (default: true) [$SPOOF_UA] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] + --spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance. (default: true) [$SPOOF_UA] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config diff --git a/docs/en/cli-reference/storage/update/zoho.md b/docs/en/cli-reference/storage/update/zoho.md index a584d6132..7dd9f61c1 100644 --- a/docs/en/cli-reference/storage/update/zoho.md +++ b/docs/en/cli-reference/storage/update/zoho.md @@ -32,6 +32,13 @@ DESCRIPTION: Leave blank to use the provider defaults. + --client-credentials + Use client credentials OAuth flow. + + This will use the OAUTH2 client Credentials Flow as described in RFC 6749. + + Note that this option is NOT supported by all backends. + --region Zoho region to connect to. @@ -47,6 +54,9 @@ DESCRIPTION: | com.cn | China | com.au | Australia + --upload-cutoff + Cutoff for switching to large file upload api (>= 10 MiB). + --encoding The encoding for the backend. @@ -64,11 +74,13 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --description value Description of the remote. [$DESCRIPTION] - --encoding value The encoding for the backend. (default: "Del,Ctl,InvalidUtf8") [$ENCODING] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --client-credentials Use client credentials OAuth flow. (default: false) [$CLIENT_CREDENTIALS] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Del,Ctl,InvalidUtf8") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] + --upload-cutoff value Cutoff for switching to large file upload api (>= 10 MiB). (default: "10Mi") [$UPLOAD_CUTOFF] Client Config diff --git a/docs/en/web-api-reference/storage.md b/docs/en/web-api-reference/storage.md index 40dddcd6f..5a638db92 100644 --- a/docs/en/web-api-reference/storage.md +++ b/docs/en/web-api-reference/storage.md @@ -152,6 +152,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/bizflycloud" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/ceph" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -164,6 +168,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/cubbit" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/digitalocean" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -172,10 +180,26 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/exaba" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/filelu" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/flashblade" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/gcs" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/hetzner" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/huaweiobs" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -188,6 +212,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/intercolo" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/ionos" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -212,6 +240,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/mega" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/minio" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -224,6 +256,14 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/outscale" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/ovhcloud" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/petabox" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -232,6 +272,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/rabata" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/rackcorp" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -248,6 +292,18 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/selectel" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/servercore" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/spectralogic" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/stackpath" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -268,6 +324,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/zata" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/seafile" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -308,10 +368,6 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} -{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/uptobox" method="post" %} -[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) -{% endswagger %} - {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/webdav" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} diff --git a/docs/jp/SUMMARY.md b/docs/jp/SUMMARY.md index 9698b6e32..174af4bac 100644 --- a/docs/jp/SUMMARY.md +++ b/docs/jp/SUMMARY.md @@ -137,7 +137,6 @@ * [新規](cli-reference/storage/create/storj/new.md) * [Sugarsync](cli-reference/storage/create/sugarsync.md) * [Swift](cli-reference/storage/create/swift.md) - * [Uptobox](cli-reference/storage/create/uptobox.md) * [Webdav](cli-reference/storage/create/webdav.md) * [Yandex](cli-reference/storage/create/yandex.md) * [Zoho](cli-reference/storage/create/zoho.md) @@ -217,7 +216,6 @@ * [新規](cli-reference/storage/update/storj/new.md) * [Sugarsync](cli-reference/storage/update/sugarsync.md) * [Swift](cli-reference/storage/update/swift.md) - * [Uptobox](cli-reference/storage/update/uptobox.md) * [Webdav](cli-reference/storage/update/webdav.md) * [Yandex](cli-reference/storage/update/yandex.md) * [Zoho](cli-reference/storage/update/zoho.md) diff --git a/docs/jp/cli-reference/run/content-provider.md b/docs/jp/cli-reference/run/content-provider.md index 6aca7894c..c737280c9 100644 --- a/docs/jp/cli-reference/run/content-provider.md +++ b/docs/jp/cli-reference/run/content-provider.md @@ -11,11 +11,9 @@ USAGE: OPTIONS: --help, -h ヘルプを表示する - Bitswapリトリーバル + HTTP IPFS Gateway - --enable-bitswap Bitswapリトリーバルを有効にする (デフォルト: false) - --libp2p-identity-key value libp2pピアのbase64エンコードされた秘密鍵 (デフォルト: AutoGenerated) - --libp2p-listen value [ --libp2p-listen value ] libp2p接続のリッスンアドレス + --enable-http-ipfs Enable trustless IPFS gateway on /ipfs/ (default: true) HTTPリトリーバル diff --git a/docs/jp/cli-reference/run/download-server.md b/docs/jp/cli-reference/run/download-server.md index 8cb93060e..827c02652 100644 --- a/docs/jp/cli-reference/run/download-server.md +++ b/docs/jp/cli-reference/run/download-server.md @@ -213,10 +213,6 @@ --sugarsync-private-access-key value Sugarsync Private Access Key. [$SUGARSYNC_PRIVATE_ACCESS_KEY] --sugarsync-refresh-token value Sugarsync refresh token. [$SUGARSYNC_REFRESH_TOKEN] - Uptobox - - --uptobox-access-token value アクセストークン。[$UPTOBOX_ACCESS_TOKEN] - WebDAV --webdav-bearer-token value ユーザー/パスワードの代わりにベアラトークン(Macaroonなど)を使用します。[$WEBDAV_BEARER_TOKEN] diff --git a/docs/jp/cli-reference/storage/create/README.md b/docs/jp/cli-reference/storage/create/README.md index 4ea798200..dd3dc1317 100644 --- a/docs/jp/cli-reference/storage/create/README.md +++ b/docs/jp/cli-reference/storage/create/README.md @@ -46,7 +46,6 @@ NAME: storj Storj分散クラウドストレージ sugarsync Sugarsync swift OpenStack Swift(Rackspace Cloud Files、Memset Memstore、OVH) - uptobox Uptobox webdav WebDAV yandex Yandex Disk zoho Zoho diff --git a/docs/jp/cli-reference/storage/create/uptobox.md b/docs/jp/cli-reference/storage/create/uptobox.md deleted file mode 100644 index fd36598c7..000000000 --- a/docs/jp/cli-reference/storage/create/uptobox.md +++ /dev/null @@ -1,37 +0,0 @@ -# Uptobox - -{% code fullWidth="true" %} -``` -名前: - singularity storage create uptobox - Uptobox - -使用法: - singularity storage create uptobox [コマンドオプション] [引数...] - -説明: - --access-token - アクセストークンです。 - - https://uptobox.com/my_account から取得してください。 - - --encoding - バックエンドのエンコーディングです。 - - 詳細は[概要のエンコーディングセクション](/overview/#encoding)を参照してください。 - - -オプション: - --access-token value アクセストークンです。 [$ACCESS_TOKEN] - --help, -h ヘルプを表示します - - 高度な設定 - - --encoding value バックエンドのエンコーディングです。 (デフォルト: "Slash, LtGt, DoubleQuote, BackQuote, Del, Ctl, LeftSpace, InvalidUtf8, Dot") [$ENCODING] - - 共通設定 - - --name value ストレージの名前です (デフォルト: 自動生成) - --path value ストレージのパスです - -``` -{% endcode %} \ No newline at end of file diff --git a/docs/jp/cli-reference/storage/update/README.md b/docs/jp/cli-reference/storage/update/README.md index 438288eba..dc5b1c938 100644 --- a/docs/jp/cli-reference/storage/update/README.md +++ b/docs/jp/cli-reference/storage/update/README.md @@ -46,7 +46,6 @@ NAME: storj Storj分散型クラウドストレージ sugarsync Sugarsync swift OpenStack Swift (Rackspace Cloud Files、Memset Memstore、OVH) - uptobox Uptobox webdav WebDAV yandex Yandex Disk zoho Zoho diff --git a/docs/jp/cli-reference/storage/update/uptobox.md b/docs/jp/cli-reference/storage/update/uptobox.md deleted file mode 100644 index 7a2aebb7a..000000000 --- a/docs/jp/cli-reference/storage/update/uptobox.md +++ /dev/null @@ -1,32 +0,0 @@ -# Uptobox - -{% code fullWidth="true" %} -``` -名前: - singularity storage update uptobox - Uptobox - -使用法: - singularity storage update uptobox [command options] - -説明: - --access-token - アクセストークンです。 - - [https://uptobox.com/my_account](https://uptobox.com/my_account)から取得してください。 - - --encoding - バックエンドのエンコーディングです。 - - 詳細については[概要のエンコーディングセクション](/overview/#encoding)を参照してください。 - - -オプション: - --access-token value アクセストークンです。 [$ACCESS_TOKEN] - --help, -h ヘルプを表示する - - 上級オプション - - --encoding value バックエンドのエンコーディングです。 (デフォルト: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] - -``` -{% endcode %} \ No newline at end of file diff --git a/docs/kr/SUMMARY.md b/docs/kr/SUMMARY.md index 662a1c9d1..2465b6576 100644 --- a/docs/kr/SUMMARY.md +++ b/docs/kr/SUMMARY.md @@ -137,7 +137,6 @@ * [새로 만들기](cli-reference/storage/create/storj/new.md) * [Sugarsync](cli-reference/storage/create/sugarsync.md) * [Swift](cli-reference/storage/create/swift.md) - * [Uptobox](cli-reference/storage/create/uptobox.md) * [Webdav](cli-reference/storage/create/webdav.md) * [Yandex](cli-reference/storage/create/yandex.md) * [Zoho](cli-reference/storage/create/zoho.md) @@ -217,7 +216,6 @@ * [새로 만들기](cli-reference/storage/update/storj/new.md) * [Sugarsync](cli-reference/storage/update/sugarsync.md) * [Swift](cli-reference/storage/update/swift.md) - * [Uptobox](cli-reference/storage/update/uptobox.md) * [Webdav](cli-reference/storage/update/webdav.md) * [Yandex](cli-reference/storage/update/yandex.md) * [Zoho](cli-reference/storage/update/zoho.md) diff --git a/docs/kr/cli-reference/run/content-provider.md b/docs/kr/cli-reference/run/content-provider.md index 48006542b..79c7a4204 100644 --- a/docs/kr/cli-reference/run/content-provider.md +++ b/docs/kr/cli-reference/run/content-provider.md @@ -11,11 +11,9 @@ 옵션: --help, -h 도움말 표시 - Bitswap 검색 + HTTP IPFS Gateway - --enable-bitswap Bitswap 검색 활성화 (기본값: false) - --libp2p-identity-key value libp2p 피어의 base64로 인코딩된 개인 키 (기본값: 자동생성) - --libp2p-listen value [ --libp2p-listen value ] libp2p 연결을 위해 듣기 대기할 주소 + --enable-http-ipfs Enable trustless IPFS gateway on /ipfs/ (default: true) HTTP 검색 diff --git a/docs/kr/cli-reference/run/download-server.md b/docs/kr/cli-reference/run/download-server.md index 9020b7a60..ba9c7cf80 100644 --- a/docs/kr/cli-reference/run/download-server.md +++ b/docs/kr/cli-reference/run/download-server.md @@ -213,10 +213,6 @@ NAME: --sugarsync-private-access-key value Sugarsync 개인 액세스 키. [$SUGARSYNC_PRIVATE_ACCESS_KEY] --sugarsync-refresh-token value Sugarsync 갱신 토큰. [$SUGARSYNC_REFRESH_TOKEN] - Uptobox - - --uptobox-access-token value 액세스 토큰. [$UPTOBOX_ACCESS_TOKEN] - WebDAV --webdav-bearer-token value 사용자/암호 대신 Bearer 토큰을 지정하십시오 (예: Macaroon). [$WEBDAV_BEARER_TOKEN] diff --git a/docs/kr/cli-reference/storage/create/README.md b/docs/kr/cli-reference/storage/create/README.md index 667f05b3a..6e6fb58e8 100644 --- a/docs/kr/cli-reference/storage/create/README.md +++ b/docs/kr/cli-reference/storage/create/README.md @@ -46,7 +46,6 @@ COMMANDS: storj Storj 분산 클라우드 스토리지 sugarsync Sugarsync swift OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) - uptobox Uptobox webdav WebDAV yandex Yandex Disk zoho Zoho diff --git a/docs/kr/cli-reference/storage/create/uptobox.md b/docs/kr/cli-reference/storage/create/uptobox.md deleted file mode 100644 index bf19794ad..000000000 --- a/docs/kr/cli-reference/storage/create/uptobox.md +++ /dev/null @@ -1,37 +0,0 @@ -# Uptobox - -{% code fullWidth="true" %} -``` -이름: - singularity storage create uptobox - Uptobox - -사용법: - singularity storage create uptobox [command options] [arguments...] - -설명: - --access-token - 액세스 토큰입니다. - - https://uptobox.com/my_account에서 얻을 수 있습니다. - - --encoding - 백엔드의 인코딩입니다. - - [개요의 인코딩 섹션](/overview/#encoding)에서 자세한 정보를 확인하세요. - - -옵션: - --access-token value 액세스 토큰입니다. [$ACCESS_TOKEN] - --help, -h 도움말 표시 - - 고급 - - --encoding value 백엔드의 인코딩입니다. (기본값: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] - - 일반 - - --name value 스토리지의 이름입니다. (기본값: 자동 생성) - --path value 스토리지의 경로입니다. - -``` -{% endcode %} \ No newline at end of file diff --git a/docs/kr/cli-reference/storage/update/README.md b/docs/kr/cli-reference/storage/update/README.md index 208ff4c3f..0c40c751b 100644 --- a/docs/kr/cli-reference/storage/update/README.md +++ b/docs/kr/cli-reference/storage/update/README.md @@ -46,7 +46,6 @@ storj Storj 분산 클라우드 저장소 sugarsync Sugarsync swift OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) - uptobox Uptobox webdav WebDAV yandex Yandex Disk zoho Zoho diff --git a/docs/kr/cli-reference/storage/update/uptobox.md b/docs/kr/cli-reference/storage/update/uptobox.md deleted file mode 100644 index fd2052415..000000000 --- a/docs/kr/cli-reference/storage/update/uptobox.md +++ /dev/null @@ -1,31 +0,0 @@ -# Uptobox - -{% code fullWidth="true" %} -``` -이름: - singularity storage update uptobox - Uptobox - -사용법: - singularity storage update uptobox [명령 옵션] <이름|ID> - -설명: - --access-token - 액세스 토큰입니다. - - https://uptobox.com/my_account에서 얻을 수 있습니다. - - --encoding - 백엔드의 인코딩입니다. - - 자세한 내용은 [개요의 인코딩 섹션](/overview/#encoding)을 참조하십시오. - - -옵션: - --access-token value 액세스 토큰입니다. [$ACCESS_TOKEN] - --help, -h 도움말 표시 - - 고급 - - --encoding value 백엔드의 인코딩입니다. (기본값: "슬래시,크기가 작은부등호,큰따옴표,역따옴표,삭제,제어,왼쪽 공백,유효하지 않은 UTF-8,닷") [$ENCODING] -``` -{% endcode %} \ No newline at end of file diff --git a/docs/swagger/docs.go b/docs/swagger/docs.go index b08876be4..9ed40a803 100644 --- a/docs/swagger/docs.go +++ b/docs/swagger/docs.go @@ -3665,7 +3665,7 @@ const docTemplate = `{ } } }, - "/storage/s3/ceph": { + "/storage/s3/bizflycloud": { "post": { "consumes": [ "application/json" @@ -3676,8 +3676,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Ceph - Ceph Object Storage", - "operationId": "CreateS3CephStorage", + "summary": "Create S3 storage with BizflyCloud - Bizfly Cloud Simple Storage", + "operationId": "CreateS3BizflyCloudStorage", "parameters": [ { "description": "Request body", @@ -3685,7 +3685,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3CephStorageRequest" + "$ref": "#/definitions/storage.createS3BizflyCloudStorageRequest" } } ], @@ -3711,7 +3711,7 @@ const docTemplate = `{ } } }, - "/storage/s3/chinamobile": { + "/storage/s3/ceph": { "post": { "consumes": [ "application/json" @@ -3722,8 +3722,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with ChinaMobile - China Mobile Ecloud Elastic Object Storage (EOS)", - "operationId": "CreateS3ChinaMobileStorage", + "summary": "Create S3 storage with Ceph - Ceph Object Storage", + "operationId": "CreateS3CephStorage", "parameters": [ { "description": "Request body", @@ -3731,7 +3731,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3ChinaMobileStorageRequest" + "$ref": "#/definitions/storage.createS3CephStorageRequest" } } ], @@ -3757,7 +3757,7 @@ const docTemplate = `{ } } }, - "/storage/s3/cloudflare": { + "/storage/s3/chinamobile": { "post": { "consumes": [ "application/json" @@ -3768,8 +3768,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Cloudflare - Cloudflare R2 Storage", - "operationId": "CreateS3CloudflareStorage", + "summary": "Create S3 storage with ChinaMobile - China Mobile Ecloud Elastic Object Storage (EOS)", + "operationId": "CreateS3ChinaMobileStorage", "parameters": [ { "description": "Request body", @@ -3777,7 +3777,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3CloudflareStorageRequest" + "$ref": "#/definitions/storage.createS3ChinaMobileStorageRequest" } } ], @@ -3803,7 +3803,7 @@ const docTemplate = `{ } } }, - "/storage/s3/digitalocean": { + "/storage/s3/cloudflare": { "post": { "consumes": [ "application/json" @@ -3814,8 +3814,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with DigitalOcean - DigitalOcean Spaces", - "operationId": "CreateS3DigitalOceanStorage", + "summary": "Create S3 storage with Cloudflare - Cloudflare R2 Storage", + "operationId": "CreateS3CloudflareStorage", "parameters": [ { "description": "Request body", @@ -3823,7 +3823,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3DigitalOceanStorageRequest" + "$ref": "#/definitions/storage.createS3CloudflareStorageRequest" } } ], @@ -3849,7 +3849,7 @@ const docTemplate = `{ } } }, - "/storage/s3/dreamhost": { + "/storage/s3/cubbit": { "post": { "consumes": [ "application/json" @@ -3860,8 +3860,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Dreamhost - Dreamhost DreamObjects", - "operationId": "CreateS3DreamhostStorage", + "summary": "Create S3 storage with Cubbit - Cubbit DS3 Object Storage", + "operationId": "CreateS3CubbitStorage", "parameters": [ { "description": "Request body", @@ -3869,7 +3869,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3DreamhostStorageRequest" + "$ref": "#/definitions/storage.createS3CubbitStorageRequest" } } ], @@ -3895,7 +3895,7 @@ const docTemplate = `{ } } }, - "/storage/s3/gcs": { + "/storage/s3/digitalocean": { "post": { "consumes": [ "application/json" @@ -3906,8 +3906,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with GCS - Google Cloud Storage", - "operationId": "CreateS3GCSStorage", + "summary": "Create S3 storage with DigitalOcean - DigitalOcean Spaces", + "operationId": "CreateS3DigitalOceanStorage", "parameters": [ { "description": "Request body", @@ -3915,7 +3915,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3GCSStorageRequest" + "$ref": "#/definitions/storage.createS3DigitalOceanStorageRequest" } } ], @@ -3941,7 +3941,7 @@ const docTemplate = `{ } } }, - "/storage/s3/huaweiobs": { + "/storage/s3/dreamhost": { "post": { "consumes": [ "application/json" @@ -3952,8 +3952,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with HuaweiOBS - Huawei Object Storage Service", - "operationId": "CreateS3HuaweiOBSStorage", + "summary": "Create S3 storage with Dreamhost - Dreamhost DreamObjects", + "operationId": "CreateS3DreamhostStorage", "parameters": [ { "description": "Request body", @@ -3961,7 +3961,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3HuaweiOBSStorageRequest" + "$ref": "#/definitions/storage.createS3DreamhostStorageRequest" } } ], @@ -3987,7 +3987,7 @@ const docTemplate = `{ } } }, - "/storage/s3/ibmcos": { + "/storage/s3/exaba": { "post": { "consumes": [ "application/json" @@ -3998,8 +3998,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with IBMCOS - IBM COS S3", - "operationId": "CreateS3IBMCOSStorage", + "summary": "Create S3 storage with Exaba - Exaba Object Storage", + "operationId": "CreateS3ExabaStorage", "parameters": [ { "description": "Request body", @@ -4007,7 +4007,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IBMCOSStorageRequest" + "$ref": "#/definitions/storage.createS3ExabaStorageRequest" } } ], @@ -4033,7 +4033,7 @@ const docTemplate = `{ } } }, - "/storage/s3/idrive": { + "/storage/s3/filelu": { "post": { "consumes": [ "application/json" @@ -4044,8 +4044,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with IDrive - IDrive e2", - "operationId": "CreateS3IDriveStorage", + "summary": "Create S3 storage with FileLu - FileLu S5 (S3-Compatible Object Storage)", + "operationId": "CreateS3FileLuStorage", "parameters": [ { "description": "Request body", @@ -4053,7 +4053,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IDriveStorageRequest" + "$ref": "#/definitions/storage.createS3FileLuStorageRequest" } } ], @@ -4079,7 +4079,7 @@ const docTemplate = `{ } } }, - "/storage/s3/ionos": { + "/storage/s3/flashblade": { "post": { "consumes": [ "application/json" @@ -4090,8 +4090,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with IONOS - IONOS Cloud", - "operationId": "CreateS3IONOSStorage", + "summary": "Create S3 storage with FlashBlade - Pure Storage FlashBlade Object Storage", + "operationId": "CreateS3FlashBladeStorage", "parameters": [ { "description": "Request body", @@ -4099,7 +4099,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IONOSStorageRequest" + "$ref": "#/definitions/storage.createS3FlashBladeStorageRequest" } } ], @@ -4125,7 +4125,7 @@ const docTemplate = `{ } } }, - "/storage/s3/leviia": { + "/storage/s3/gcs": { "post": { "consumes": [ "application/json" @@ -4136,8 +4136,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Leviia - Leviia Object Storage", - "operationId": "CreateS3LeviiaStorage", + "summary": "Create S3 storage with GCS - Google Cloud Storage", + "operationId": "CreateS3GCSStorage", "parameters": [ { "description": "Request body", @@ -4145,7 +4145,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LeviiaStorageRequest" + "$ref": "#/definitions/storage.createS3GCSStorageRequest" } } ], @@ -4171,7 +4171,7 @@ const docTemplate = `{ } } }, - "/storage/s3/liara": { + "/storage/s3/hetzner": { "post": { "consumes": [ "application/json" @@ -4182,8 +4182,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Liara - Liara Object Storage", - "operationId": "CreateS3LiaraStorage", + "summary": "Create S3 storage with Hetzner - Hetzner Object Storage", + "operationId": "CreateS3HetznerStorage", "parameters": [ { "description": "Request body", @@ -4191,7 +4191,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LiaraStorageRequest" + "$ref": "#/definitions/storage.createS3HetznerStorageRequest" } } ], @@ -4217,7 +4217,7 @@ const docTemplate = `{ } } }, - "/storage/s3/linode": { + "/storage/s3/huaweiobs": { "post": { "consumes": [ "application/json" @@ -4228,8 +4228,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Linode - Linode Object Storage", - "operationId": "CreateS3LinodeStorage", + "summary": "Create S3 storage with HuaweiOBS - Huawei Object Storage Service", + "operationId": "CreateS3HuaweiOBSStorage", "parameters": [ { "description": "Request body", @@ -4237,7 +4237,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LinodeStorageRequest" + "$ref": "#/definitions/storage.createS3HuaweiOBSStorageRequest" } } ], @@ -4263,7 +4263,7 @@ const docTemplate = `{ } } }, - "/storage/s3/lyvecloud": { + "/storage/s3/ibmcos": { "post": { "consumes": [ "application/json" @@ -4274,8 +4274,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with LyveCloud - Seagate Lyve Cloud", - "operationId": "CreateS3LyveCloudStorage", + "summary": "Create S3 storage with IBMCOS - IBM COS S3", + "operationId": "CreateS3IBMCOSStorage", "parameters": [ { "description": "Request body", @@ -4283,7 +4283,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LyveCloudStorageRequest" + "$ref": "#/definitions/storage.createS3IBMCOSStorageRequest" } } ], @@ -4309,7 +4309,7 @@ const docTemplate = `{ } } }, - "/storage/s3/magalu": { + "/storage/s3/idrive": { "post": { "consumes": [ "application/json" @@ -4320,8 +4320,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Magalu - Magalu Object Storage", - "operationId": "CreateS3MagaluStorage", + "summary": "Create S3 storage with IDrive - IDrive e2", + "operationId": "CreateS3IDriveStorage", "parameters": [ { "description": "Request body", @@ -4329,7 +4329,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3MagaluStorageRequest" + "$ref": "#/definitions/storage.createS3IDriveStorageRequest" } } ], @@ -4355,7 +4355,7 @@ const docTemplate = `{ } } }, - "/storage/s3/minio": { + "/storage/s3/intercolo": { "post": { "consumes": [ "application/json" @@ -4366,8 +4366,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Minio - Minio Object Storage", - "operationId": "CreateS3MinioStorage", + "summary": "Create S3 storage with Intercolo - Intercolo Object Storage", + "operationId": "CreateS3IntercoloStorage", "parameters": [ { "description": "Request body", @@ -4375,7 +4375,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3MinioStorageRequest" + "$ref": "#/definitions/storage.createS3IntercoloStorageRequest" } } ], @@ -4401,7 +4401,7 @@ const docTemplate = `{ } } }, - "/storage/s3/netease": { + "/storage/s3/ionos": { "post": { "consumes": [ "application/json" @@ -4412,8 +4412,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Netease - Netease Object Storage (NOS)", - "operationId": "CreateS3NeteaseStorage", + "summary": "Create S3 storage with IONOS - IONOS Cloud", + "operationId": "CreateS3IONOSStorage", "parameters": [ { "description": "Request body", @@ -4421,7 +4421,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3NeteaseStorageRequest" + "$ref": "#/definitions/storage.createS3IONOSStorageRequest" } } ], @@ -4447,7 +4447,7 @@ const docTemplate = `{ } } }, - "/storage/s3/other": { + "/storage/s3/leviia": { "post": { "consumes": [ "application/json" @@ -4458,8 +4458,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Other - Any other S3 compatible provider", - "operationId": "CreateS3OtherStorage", + "summary": "Create S3 storage with Leviia - Leviia Object Storage", + "operationId": "CreateS3LeviiaStorage", "parameters": [ { "description": "Request body", @@ -4467,7 +4467,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3OtherStorageRequest" + "$ref": "#/definitions/storage.createS3LeviiaStorageRequest" } } ], @@ -4493,7 +4493,7 @@ const docTemplate = `{ } } }, - "/storage/s3/petabox": { + "/storage/s3/liara": { "post": { "consumes": [ "application/json" @@ -4504,8 +4504,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Petabox - Petabox Object Storage", - "operationId": "CreateS3PetaboxStorage", + "summary": "Create S3 storage with Liara - Liara Object Storage", + "operationId": "CreateS3LiaraStorage", "parameters": [ { "description": "Request body", @@ -4513,7 +4513,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3PetaboxStorageRequest" + "$ref": "#/definitions/storage.createS3LiaraStorageRequest" } } ], @@ -4539,7 +4539,7 @@ const docTemplate = `{ } } }, - "/storage/s3/qiniu": { + "/storage/s3/linode": { "post": { "consumes": [ "application/json" @@ -4550,8 +4550,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Qiniu - Qiniu Object Storage (Kodo)", - "operationId": "CreateS3QiniuStorage", + "summary": "Create S3 storage with Linode - Linode Object Storage", + "operationId": "CreateS3LinodeStorage", "parameters": [ { "description": "Request body", @@ -4559,7 +4559,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3QiniuStorageRequest" + "$ref": "#/definitions/storage.createS3LinodeStorageRequest" } } ], @@ -4585,7 +4585,7 @@ const docTemplate = `{ } } }, - "/storage/s3/rackcorp": { + "/storage/s3/lyvecloud": { "post": { "consumes": [ "application/json" @@ -4596,8 +4596,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with RackCorp - RackCorp Object Storage", - "operationId": "CreateS3RackCorpStorage", + "summary": "Create S3 storage with LyveCloud - Seagate Lyve Cloud", + "operationId": "CreateS3LyveCloudStorage", "parameters": [ { "description": "Request body", @@ -4605,7 +4605,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3RackCorpStorageRequest" + "$ref": "#/definitions/storage.createS3LyveCloudStorageRequest" } } ], @@ -4631,7 +4631,7 @@ const docTemplate = `{ } } }, - "/storage/s3/rclone": { + "/storage/s3/magalu": { "post": { "consumes": [ "application/json" @@ -4642,8 +4642,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Rclone - Rclone S3 Server", - "operationId": "CreateS3RcloneStorage", + "summary": "Create S3 storage with Magalu - Magalu Object Storage", + "operationId": "CreateS3MagaluStorage", "parameters": [ { "description": "Request body", @@ -4651,7 +4651,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3RcloneStorageRequest" + "$ref": "#/definitions/storage.createS3MagaluStorageRequest" } } ], @@ -4677,7 +4677,7 @@ const docTemplate = `{ } } }, - "/storage/s3/scaleway": { + "/storage/s3/mega": { "post": { "consumes": [ "application/json" @@ -4688,8 +4688,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Scaleway - Scaleway Object Storage", - "operationId": "CreateS3ScalewayStorage", + "summary": "Create S3 storage with Mega - MEGA S4 Object Storage", + "operationId": "CreateS3MegaStorage", "parameters": [ { "description": "Request body", @@ -4697,7 +4697,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3ScalewayStorageRequest" + "$ref": "#/definitions/storage.createS3MegaStorageRequest" } } ], @@ -4723,7 +4723,7 @@ const docTemplate = `{ } } }, - "/storage/s3/seaweedfs": { + "/storage/s3/minio": { "post": { "consumes": [ "application/json" @@ -4734,8 +4734,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with SeaweedFS - SeaweedFS S3", - "operationId": "CreateS3SeaweedFSStorage", + "summary": "Create S3 storage with Minio - Minio Object Storage", + "operationId": "CreateS3MinioStorage", "parameters": [ { "description": "Request body", @@ -4743,7 +4743,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3SeaweedFSStorageRequest" + "$ref": "#/definitions/storage.createS3MinioStorageRequest" } } ], @@ -4769,7 +4769,7 @@ const docTemplate = `{ } } }, - "/storage/s3/stackpath": { + "/storage/s3/netease": { "post": { "consumes": [ "application/json" @@ -4780,8 +4780,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with StackPath - StackPath Object Storage", - "operationId": "CreateS3StackPathStorage", + "summary": "Create S3 storage with Netease - Netease Object Storage (NOS)", + "operationId": "CreateS3NeteaseStorage", "parameters": [ { "description": "Request body", @@ -4789,7 +4789,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3StackPathStorageRequest" + "$ref": "#/definitions/storage.createS3NeteaseStorageRequest" } } ], @@ -4815,7 +4815,7 @@ const docTemplate = `{ } } }, - "/storage/s3/storj": { + "/storage/s3/other": { "post": { "consumes": [ "application/json" @@ -4826,8 +4826,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Storj - Storj (S3 Compatible Gateway)", - "operationId": "CreateS3StorjStorage", + "summary": "Create S3 storage with Other - Any other S3 compatible provider", + "operationId": "CreateS3OtherStorage", "parameters": [ { "description": "Request body", @@ -4835,7 +4835,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3StorjStorageRequest" + "$ref": "#/definitions/storage.createS3OtherStorageRequest" } } ], @@ -4861,7 +4861,7 @@ const docTemplate = `{ } } }, - "/storage/s3/synology": { + "/storage/s3/outscale": { "post": { "consumes": [ "application/json" @@ -4872,8 +4872,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Synology - Synology C2 Object Storage", - "operationId": "CreateS3SynologyStorage", + "summary": "Create S3 storage with Outscale - OUTSCALE Object Storage (OOS)", + "operationId": "CreateS3OutscaleStorage", "parameters": [ { "description": "Request body", @@ -4881,7 +4881,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3SynologyStorageRequest" + "$ref": "#/definitions/storage.createS3OutscaleStorageRequest" } } ], @@ -4907,7 +4907,7 @@ const docTemplate = `{ } } }, - "/storage/s3/tencentcos": { + "/storage/s3/ovhcloud": { "post": { "consumes": [ "application/json" @@ -4918,8 +4918,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with TencentCOS - Tencent Cloud Object Storage (COS)", - "operationId": "CreateS3TencentCOSStorage", + "summary": "Create S3 storage with OVHcloud - OVHcloud Object Storage", + "operationId": "CreateS3OVHcloudStorage", "parameters": [ { "description": "Request body", @@ -4927,7 +4927,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3TencentCOSStorageRequest" + "$ref": "#/definitions/storage.createS3OVHcloudStorageRequest" } } ], @@ -4953,7 +4953,7 @@ const docTemplate = `{ } } }, - "/storage/s3/wasabi": { + "/storage/s3/petabox": { "post": { "consumes": [ "application/json" @@ -4964,8 +4964,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Wasabi - Wasabi Object Storage", - "operationId": "CreateS3WasabiStorage", + "summary": "Create S3 storage with Petabox - Petabox Object Storage", + "operationId": "CreateS3PetaboxStorage", "parameters": [ { "description": "Request body", @@ -4973,7 +4973,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3WasabiStorageRequest" + "$ref": "#/definitions/storage.createS3PetaboxStorageRequest" } } ], @@ -4999,7 +4999,7 @@ const docTemplate = `{ } } }, - "/storage/seafile": { + "/storage/s3/qiniu": { "post": { "consumes": [ "application/json" @@ -5010,8 +5010,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Seafile storage", - "operationId": "CreateSeafileStorage", + "summary": "Create S3 storage with Qiniu - Qiniu Object Storage (Kodo)", + "operationId": "CreateS3QiniuStorage", "parameters": [ { "description": "Request body", @@ -5019,7 +5019,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSeafileStorageRequest" + "$ref": "#/definitions/storage.createS3QiniuStorageRequest" } } ], @@ -5045,7 +5045,7 @@ const docTemplate = `{ } } }, - "/storage/sftp": { + "/storage/s3/rabata": { "post": { "consumes": [ "application/json" @@ -5056,8 +5056,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Sftp storage", - "operationId": "CreateSftpStorage", + "summary": "Create S3 storage with Rabata - Rabata Cloud Storage", + "operationId": "CreateS3RabataStorage", "parameters": [ { "description": "Request body", @@ -5065,7 +5065,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSftpStorageRequest" + "$ref": "#/definitions/storage.createS3RabataStorageRequest" } } ], @@ -5091,7 +5091,7 @@ const docTemplate = `{ } } }, - "/storage/sharefile": { + "/storage/s3/rackcorp": { "post": { "consumes": [ "application/json" @@ -5102,8 +5102,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Sharefile storage", - "operationId": "CreateSharefileStorage", + "summary": "Create S3 storage with RackCorp - RackCorp Object Storage", + "operationId": "CreateS3RackCorpStorage", "parameters": [ { "description": "Request body", @@ -5111,7 +5111,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSharefileStorageRequest" + "$ref": "#/definitions/storage.createS3RackCorpStorageRequest" } } ], @@ -5137,7 +5137,7 @@ const docTemplate = `{ } } }, - "/storage/sia": { + "/storage/s3/rclone": { "post": { "consumes": [ "application/json" @@ -5148,8 +5148,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Sia storage", - "operationId": "CreateSiaStorage", + "summary": "Create S3 storage with Rclone - Rclone S3 Server", + "operationId": "CreateS3RcloneStorage", "parameters": [ { "description": "Request body", @@ -5157,7 +5157,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSiaStorageRequest" + "$ref": "#/definitions/storage.createS3RcloneStorageRequest" } } ], @@ -5183,7 +5183,7 @@ const docTemplate = `{ } } }, - "/storage/smb": { + "/storage/s3/scaleway": { "post": { "consumes": [ "application/json" @@ -5194,8 +5194,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Smb storage", - "operationId": "CreateSmbStorage", + "summary": "Create S3 storage with Scaleway - Scaleway Object Storage", + "operationId": "CreateS3ScalewayStorage", "parameters": [ { "description": "Request body", @@ -5203,7 +5203,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSmbStorageRequest" + "$ref": "#/definitions/storage.createS3ScalewayStorageRequest" } } ], @@ -5229,7 +5229,7 @@ const docTemplate = `{ } } }, - "/storage/storj/existing": { + "/storage/s3/seaweedfs": { "post": { "consumes": [ "application/json" @@ -5240,8 +5240,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Storj storage with existing - Use an existing access grant.", - "operationId": "CreateStorjExistingStorage", + "summary": "Create S3 storage with SeaweedFS - SeaweedFS S3", + "operationId": "CreateS3SeaweedFSStorage", "parameters": [ { "description": "Request body", @@ -5249,7 +5249,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createStorjExistingStorageRequest" + "$ref": "#/definitions/storage.createS3SeaweedFSStorageRequest" } } ], @@ -5275,7 +5275,7 @@ const docTemplate = `{ } } }, - "/storage/storj/new": { + "/storage/s3/selectel": { "post": { "consumes": [ "application/json" @@ -5286,8 +5286,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Storj storage with new - Create a new access grant from satellite address, API key, and passphrase.", - "operationId": "CreateStorjNewStorage", + "summary": "Create S3 storage with Selectel - Selectel Object Storage", + "operationId": "CreateS3SelectelStorage", "parameters": [ { "description": "Request body", @@ -5295,7 +5295,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createStorjNewStorageRequest" + "$ref": "#/definitions/storage.createS3SelectelStorageRequest" } } ], @@ -5321,7 +5321,7 @@ const docTemplate = `{ } } }, - "/storage/sugarsync": { + "/storage/s3/servercore": { "post": { "consumes": [ "application/json" @@ -5332,8 +5332,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Sugarsync storage", - "operationId": "CreateSugarsyncStorage", + "summary": "Create S3 storage with Servercore - Servercore Object Storage", + "operationId": "CreateS3ServercoreStorage", "parameters": [ { "description": "Request body", @@ -5341,7 +5341,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSugarsyncStorageRequest" + "$ref": "#/definitions/storage.createS3ServercoreStorageRequest" } } ], @@ -5367,7 +5367,7 @@ const docTemplate = `{ } } }, - "/storage/swift": { + "/storage/s3/spectralogic": { "post": { "consumes": [ "application/json" @@ -5378,8 +5378,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Swift storage", - "operationId": "CreateSwiftStorage", + "summary": "Create S3 storage with SpectraLogic - Spectra Logic Black Pearl", + "operationId": "CreateS3SpectraLogicStorage", "parameters": [ { "description": "Request body", @@ -5387,7 +5387,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSwiftStorageRequest" + "$ref": "#/definitions/storage.createS3SpectraLogicStorageRequest" } } ], @@ -5413,7 +5413,7 @@ const docTemplate = `{ } } }, - "/storage/union": { + "/storage/s3/stackpath": { "post": { "consumes": [ "application/json" @@ -5424,8 +5424,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Union storage", - "operationId": "CreateUnionStorage", + "summary": "Create S3 storage with StackPath - StackPath Object Storage", + "operationId": "CreateS3StackPathStorage", "parameters": [ { "description": "Request body", @@ -5433,7 +5433,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createUnionStorageRequest" + "$ref": "#/definitions/storage.createS3StackPathStorageRequest" } } ], @@ -5459,7 +5459,7 @@ const docTemplate = `{ } } }, - "/storage/uptobox": { + "/storage/s3/storj": { "post": { "consumes": [ "application/json" @@ -5470,8 +5470,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Uptobox storage", - "operationId": "CreateUptoboxStorage", + "summary": "Create S3 storage with Storj - Storj (S3 Compatible Gateway)", + "operationId": "CreateS3StorjStorage", "parameters": [ { "description": "Request body", @@ -5479,7 +5479,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createUptoboxStorageRequest" + "$ref": "#/definitions/storage.createS3StorjStorageRequest" } } ], @@ -5505,7 +5505,7 @@ const docTemplate = `{ } } }, - "/storage/webdav": { + "/storage/s3/synology": { "post": { "consumes": [ "application/json" @@ -5516,8 +5516,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Webdav storage", - "operationId": "CreateWebdavStorage", + "summary": "Create S3 storage with Synology - Synology C2 Object Storage", + "operationId": "CreateS3SynologyStorage", "parameters": [ { "description": "Request body", @@ -5525,7 +5525,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createWebdavStorageRequest" + "$ref": "#/definitions/storage.createS3SynologyStorageRequest" } } ], @@ -5551,7 +5551,7 @@ const docTemplate = `{ } } }, - "/storage/yandex": { + "/storage/s3/tencentcos": { "post": { "consumes": [ "application/json" @@ -5562,8 +5562,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Yandex storage", - "operationId": "CreateYandexStorage", + "summary": "Create S3 storage with TencentCOS - Tencent Cloud Object Storage (COS)", + "operationId": "CreateS3TencentCOSStorage", "parameters": [ { "description": "Request body", @@ -5571,7 +5571,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createYandexStorageRequest" + "$ref": "#/definitions/storage.createS3TencentCOSStorageRequest" } } ], @@ -5597,7 +5597,7 @@ const docTemplate = `{ } } }, - "/storage/zoho": { + "/storage/s3/wasabi": { "post": { "consumes": [ "application/json" @@ -5608,8 +5608,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Zoho storage", - "operationId": "CreateZohoStorage", + "summary": "Create S3 storage with Wasabi - Wasabi Object Storage", + "operationId": "CreateS3WasabiStorage", "parameters": [ { "description": "Request body", @@ -5617,7 +5617,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createZohoStorageRequest" + "$ref": "#/definitions/storage.createS3WasabiStorageRequest" } } ], @@ -5643,25 +5643,36 @@ const docTemplate = `{ } } }, - "/storage/{name}": { - "delete": { + "/storage/s3/zata": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "tags": [ "Storage" ], - "summary": "Remove a storage", - "operationId": "RemoveStorage", + "summary": "Create S3 storage with Zata - Zata (S3 compatible Gateway)", + "operationId": "CreateS3ZataStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createS3ZataStorageRequest" + } } ], "responses": { - "204": { - "description": "No Content" + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } }, "400": { "description": "Bad Request", @@ -5676,8 +5687,10 @@ const docTemplate = `{ } } } - }, - "patch": { + } + }, + "/storage/seafile": { + "post": { "consumes": [ "application/json" ], @@ -5687,26 +5700,16 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Update a storage connection", - "operationId": "UpdateStorage", + "summary": "Create Seafile storage", + "operationId": "CreateSeafileStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "description": "Configuration", - "name": "config", + "description": "Request body", + "name": "request", "in": "body", "required": true, "schema": { - "type": "object", - "additionalProperties": { - "type": "string" - } + "$ref": "#/definitions/storage.createSeafileStorageRequest" } } ], @@ -5732,8 +5735,8 @@ const docTemplate = `{ } } }, - "/storage/{name}/explore/{path}": { - "get": { + "/storage/sftp": { + "post": { "consumes": [ "application/json" ], @@ -5743,32 +5746,24 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Explore directory entries in a storage system", - "operationId": "ExploreStorage", + "summary": "Create Sftp storage", + "operationId": "CreateSftpStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "Path in the storage system to explore", - "name": "path", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSftpStorageRequest" + } } ], "responses": { "200": { "description": "OK", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/storage.DirEntry" - } + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5786,8 +5781,8 @@ const docTemplate = `{ } } }, - "/storage/{name}/rename": { - "patch": { + "/storage/sharefile": { + "post": { "consumes": [ "application/json" ], @@ -5797,23 +5792,62 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Rename a storage connection", - "operationId": "RenameStorage", + "summary": "Create Sharefile storage", + "operationId": "CreateSharefileStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSharefileStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/sia": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Sia storage", + "operationId": "CreateSiaStorage", + "parameters": [ { - "description": "New storage name", + "description": "Request body", "name": "request", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.RenameRequest" + "$ref": "#/definitions/storage.createSiaStorageRequest" } } ], @@ -5839,24 +5873,35 @@ const docTemplate = `{ } } }, - "/wallet": { - "get": { + "/storage/smb": { + "post": { + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "Wallet" + "Storage" + ], + "summary": "Create Smb storage", + "operationId": "CreateSmbStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSmbStorageRequest" + } + } ], - "summary": "List all imported wallets", - "operationId": "ListWallets", "responses": { "200": { "description": "OK", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Wallet" - } + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5872,7 +5917,9 @@ const docTemplate = `{ } } } - }, + } + }, + "/storage/storj/existing": { "post": { "consumes": [ "application/json" @@ -5881,10 +5928,10 @@ const docTemplate = `{ "application/json" ], "tags": [ - "Wallet" + "Storage" ], - "summary": "Import a private key", - "operationId": "ImportWallet", + "summary": "Create Storj storage with existing - Use an existing access grant.", + "operationId": "CreateStorjExistingStorage", "parameters": [ { "description": "Request body", @@ -5892,7 +5939,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/wallet.ImportKeystoreRequest" + "$ref": "#/definitions/storage.createStorjExistingStorageRequest" } } ], @@ -5900,7 +5947,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/model.Wallet" + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5918,25 +5965,36 @@ const docTemplate = `{ } } }, - "/wallet/{address}": { - "delete": { + "/storage/storj/new": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "tags": [ - "Wallet" + "Storage" ], - "summary": "Remove a wallet", - "operationId": "RemoveWallet", + "summary": "Create Storj storage with new - Create a new access grant from satellite address, API key, and passphrase.", + "operationId": "CreateStorjNewStorage", "parameters": [ { - "type": "string", - "description": "Address", - "name": "address", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createStorjNewStorageRequest" + } } ], "responses": { - "204": { - "description": "No Content" + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } }, "400": { "description": "Bad Request", @@ -5952,1476 +6010,7478 @@ const docTemplate = `{ } } } - } - }, + }, + "/storage/sugarsync": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Sugarsync storage", + "operationId": "CreateSugarsyncStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSugarsyncStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/swift": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Swift storage", + "operationId": "CreateSwiftStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSwiftStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/union": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Union storage", + "operationId": "CreateUnionStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createUnionStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/webdav": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Webdav storage", + "operationId": "CreateWebdavStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createWebdavStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/yandex": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Yandex storage", + "operationId": "CreateYandexStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createYandexStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/zoho": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Zoho storage", + "operationId": "CreateZohoStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createZohoStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/{name}": { + "delete": { + "tags": [ + "Storage" + ], + "summary": "Remove a storage", + "operationId": "RemoveStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + }, + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Update a storage connection", + "operationId": "UpdateStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "description": "Configuration", + "name": "config", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/{name}/explore/{path}": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Explore directory entries in a storage system", + "operationId": "ExploreStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path in the storage system to explore", + "name": "path", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/storage.DirEntry" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/{name}/rename": { + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Rename a storage connection", + "operationId": "RenameStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "description": "New storage name", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.RenameRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/wallet": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "Wallet" + ], + "summary": "List all imported wallets", + "operationId": "ListWallets", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Wallet" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + }, + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Wallet" + ], + "summary": "Import a private key", + "operationId": "ImportWallet", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/wallet.ImportKeystoreRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Wallet" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/wallet/{address}": { + "delete": { + "tags": [ + "Wallet" + ], + "summary": "Remove a wallet", + "operationId": "RemoveWallet", + "parameters": [ + { + "type": "string", + "description": "Address", + "name": "address", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + } + }, "definitions": { "admin.SetIdentityRequest": { "type": "object", "properties": { - "identity": { + "identity": { + "type": "string" + } + } + }, + "api.HTTPError": { + "type": "object", + "properties": { + "err": { + "type": "string" + } + } + }, + "dataprep.AddPieceRequest": { + "type": "object", + "required": [ + "pieceCid" + ], + "properties": { + "fileSize": { + "description": "File size of the CAR file, this is required for boost online deal", + "type": "integer" + }, + "pieceCid": { + "description": "CID of the piece", + "type": "string" + }, + "pieceSize": { + "description": "Size of the piece (required for external import, optional if piece exists in DB)", + "type": "string" + }, + "rootCid": { + "description": "Root CID of the CAR file, used to populate the label field of storage deal", + "type": "string" + } + } + }, + "dataprep.CreateRequest": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "deleteAfterExport": { + "description": "Whether to delete the source files after export", + "type": "boolean", + "default": false + }, + "maxSize": { + "description": "Maximum size of the CAR files to be created", + "type": "string", + "default": "31.5GiB" + }, + "minPieceSize": { + "description": "Minimum piece size for the preparation, applies only to DAG and remainer pieces", + "type": "string", + "default": "1MiB" + }, + "name": { + "description": "Name of the preparation", + "type": "string" + }, + "noDag": { + "description": "Whether to disable maintaining folder dag structure for the sources. If disabled, DagGen will not be possible and folders will not have an associated CID.", + "type": "boolean", + "default": false + }, + "noInline": { + "description": "Whether to disable inline storage for the preparation. Can save database space but requires at least one output storage.", + "type": "boolean", + "default": false + }, + "outputStorages": { + "description": "Name of Output storage systems to be used for the output", + "type": "array", + "items": { + "type": "string" + } + }, + "pieceSize": { + "description": "Target piece size of the CAR files used for piece commitment calculation", + "type": "string" + }, + "sourceStorages": { + "description": "Name of Source storage systems to be used for the source", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "dataprep.DeletePieceRequest": { + "type": "object", + "properties": { + "deleteCar": { + "description": "Delete the physical CAR file from storage (default: true)", + "type": "boolean" + }, + "force": { + "description": "Delete even if deals reference this piece", + "type": "boolean" + } + } + }, + "dataprep.DirEntry": { + "type": "object", + "properties": { + "cid": { + "type": "string" + }, + "fileVersions": { + "type": "array", + "items": { + "$ref": "#/definitions/dataprep.Version" + } + }, + "isDir": { + "type": "boolean" + }, + "path": { + "type": "string" + } + } + }, + "dataprep.ExploreResult": { + "type": "object", + "properties": { + "cid": { + "type": "string" + }, + "path": { + "type": "string" + }, + "subEntries": { + "type": "array", + "items": { + "$ref": "#/definitions/dataprep.DirEntry" + } + } + } + }, + "dataprep.PieceList": { + "type": "object", + "properties": { + "attachmentId": { + "type": "integer" + }, + "pieces": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Car" + } + }, + "source": { + "$ref": "#/definitions/model.Storage" + }, + "storageId": { + "type": "integer" + } + } + }, + "dataprep.RemoveRequest": { + "type": "object", + "properties": { + "removeCars": { + "type": "boolean" + } + } + }, + "dataprep.RenameRequest": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + } + } + }, + "dataprep.Version": { + "type": "object", + "properties": { + "cid": { + "type": "string" + }, + "hash": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "lastModified": { + "type": "string" + }, + "size": { + "type": "integer" + } + } + }, + "deal.ListDealRequest": { + "type": "object", + "properties": { + "dealTypes": { + "description": "deal type filter (market for f05, pdp for f41)", + "type": "array", + "items": { + "$ref": "#/definitions/model.DealType" + } + }, + "preparations": { + "description": "preparation ID or name filter", + "type": "array", + "items": { + "type": "string" + } + }, + "providers": { + "description": "provider filter", + "type": "array", + "items": { + "type": "string" + } + }, + "schedules": { + "description": "schedule id filter", + "type": "array", + "items": { + "type": "integer" + } + }, + "sources": { + "description": "source ID or name filter", + "type": "array", + "items": { + "type": "string" + } + }, + "states": { + "description": "state filter", + "type": "array", + "items": { + "$ref": "#/definitions/model.DealState" + } + } + } + }, + "deal.Proposal": { + "type": "object", + "properties": { + "clientAddress": { + "description": "Client address", + "type": "string" + }, + "duration": { + "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "type": "string", + "default": "12740h" + }, + "fileSize": { + "description": "File size in bytes for boost to fetch the CAR file", + "type": "integer" + }, + "httpHeaders": { + "description": "http headers to be passed with the request (i.e. key=value)", + "type": "array", + "items": { + "type": "string" + } + }, + "ipni": { + "description": "Whether the deal should be IPNI", + "type": "boolean", + "default": true + }, + "keepUnsealed": { + "description": "Whether the deal should be kept unsealed", + "type": "boolean", + "default": true + }, + "pieceCid": { + "description": "Piece CID", + "type": "string" + }, + "pieceSize": { + "description": "Piece size", + "type": "string" + }, + "pricePerDeal": { + "description": "Price in FIL per deal", + "type": "number", + "default": 0 + }, + "pricePerGb": { + "description": "Price in FIL per GiB", + "type": "number", + "default": 0 + }, + "pricePerGbEpoch": { + "description": "Price in FIL per GiB per epoch", + "type": "number", + "default": 0 + }, + "providerId": { + "description": "Provider ID", + "type": "string" + }, + "rootCid": { + "description": "Root CID that is required as part of the deal proposal, if empty, will be set to empty CID", + "type": "string", + "default": "bafkqaaa" + }, + "startDelay": { + "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "type": "string", + "default": "72h" + }, + "urlTemplate": { + "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "type": "string" + }, + "verified": { + "description": "Whether the deal should be verified", + "type": "boolean", + "default": true + } + } + }, + "file.DealsForFileRange": { + "type": "object", + "properties": { + "deals": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Deal" + } + }, + "fileRange": { + "$ref": "#/definitions/model.FileRange" + } + } + }, + "file.Info": { + "type": "object", + "properties": { + "path": { + "description": "Path to the new file, relative to the source", + "type": "string" + } + } + }, + "job.SourceStatus": { + "type": "object", + "properties": { + "attachmentId": { + "type": "integer" + }, + "jobs": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Job" + } + }, + "output": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Storage" + } + }, + "source": { + "$ref": "#/definitions/model.Storage" + }, + "storageId": { + "type": "integer" + } + } + }, + "model.Car": { + "type": "object", + "properties": { + "attachmentId": { + "type": "integer" + }, + "createdAt": { + "type": "string" + }, + "fileSize": { + "type": "integer" + }, + "id": { + "type": "integer" + }, + "jobId": { + "type": "integer" + }, + "minPieceSizePadding": { + "description": "MinPieceSizePadding tracks virtual padding for inline mode only. Inline: stores padding amount, PieceReader serves zeros virtually. Non-inline: always 0, literal zeros are written to CAR file for Curio TreeD compatibility.", + "type": "integer" + }, + "numOfFiles": { + "type": "integer" + }, + "pieceCid": { + "type": "string" + }, + "pieceSize": { + "type": "integer" + }, + "pieceType": { + "description": "PieceType indicates whether this is a data piece or DAG piece", + "type": "string" + }, + "preparationId": { + "description": "Association - SET NULL for fast prep deletion, async cleanup", + "type": "integer" + }, + "rootCid": { + "type": "string" + }, + "storageId": { + "type": "integer" + }, + "storagePath": { + "description": "StoragePath is the path to the CAR file inside the storage. If the StorageID is nil but StoragePath is not empty, it means the CAR file is stored at the local absolute path.", + "type": "string" + } + } + }, + "model.ClientConfig": { + "type": "object", + "properties": { + "caCert": { + "description": "Paths to CA certificate used to verify servers", + "type": "array", + "items": { + "type": "string" + } + }, + "clientCert": { + "description": "Path to Client SSL certificate (PEM) for mutual TLS auth", + "type": "string" + }, + "clientKey": { + "description": "Path to Client SSL private key (PEM) for mutual TLS auth", + "type": "string" + }, + "connectTimeout": { + "description": "HTTP Client Connect timeout", + "type": "integer" + }, + "disableHttp2": { + "description": "Disable HTTP/2 in the transport", + "type": "boolean" + }, + "disableHttpKeepAlives": { + "description": "Disable HTTP keep-alives and use each connection once.", + "type": "boolean" + }, + "expectContinueTimeout": { + "description": "Timeout when using expect / 100-continue in HTTP", + "type": "integer" + }, + "headers": { + "description": "Set HTTP header for all transactions", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "insecureSkipVerify": { + "description": "Do not verify the server SSL certificate (insecure)", + "type": "boolean" + }, + "lowlevelRetries": { + "description": "Maximum number of retries for low-level client errors. Default is 10 retries.", + "type": "integer" + }, + "noGzip": { + "description": "Don't set Accept-Encoding: gzip", + "type": "boolean" + }, + "retryBackoff": { + "description": "Constant backoff between retries. Default is 1s.", + "type": "integer" + }, + "retryBackoffExponential": { + "description": "Exponential backoff between retries. Default is 1.0.", + "type": "number" + }, + "retryDelay": { + "description": "Delay between retries. Default is 1s.", + "type": "integer" + }, + "retryMaxCount": { + "description": "Maximum number of retries. Default is 10 retries.", + "type": "integer" + }, + "scanConcurrency": { + "description": "Maximum number of concurrent scan requests. Default is 1.", + "type": "integer" + }, + "skipInaccessibleFile": { + "description": "Skip inaccessible files. Default is false.", + "type": "boolean" + }, + "timeout": { + "description": "IO idle timeout", + "type": "integer" + }, + "useServerModTime": { + "description": "Use server modified time instead of object metadata", + "type": "boolean" + }, + "userAgent": { + "description": "Set the user-agent to a specified string", + "type": "string" + } + } + }, + "model.ConfigMap": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "model.Deal": { + "type": "object", + "properties": { + "clientId": { + "type": "string" + }, + "createdAt": { + "type": "string" + }, + "dealId": { + "type": "integer" + }, + "dealType": { + "$ref": "#/definitions/model.DealType" + }, + "endEpoch": { + "type": "integer" + }, + "errorMessage": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "label": { + "type": "string" + }, + "lastVerifiedAt": { + "description": "LastVerifiedAt is the last time the deal was verified as active by the tracker", + "type": "string" + }, + "nextChallengeEpoch": { + "description": "NextChallengeEpoch is the next epoch when a challenge proof is due", + "type": "integer" + }, + "pieceCid": { + "type": "string" + }, + "pieceSize": { + "type": "integer" + }, + "price": { + "type": "string" + }, + "proofSetId": { + "description": "PDP-specific fields (only populated for DealTypePDP)", + "type": "integer" + }, + "proofSetLive": { + "description": "ProofSetLive indicates if the proof set is live (actively being challenged)", + "type": "boolean" + }, + "proposalId": { + "type": "string" + }, + "provider": { + "type": "string" + }, + "scheduleId": { + "description": "Associations", + "type": "integer" + }, + "sectorStartEpoch": { + "type": "integer" + }, + "startEpoch": { + "type": "integer" + }, + "state": { + "$ref": "#/definitions/model.DealState" + }, + "updatedAt": { + "type": "string" + }, + "verified": { + "type": "boolean" + }, + "walletId": { + "type": "integer" + } + } + }, + "model.DealState": { + "type": "string", + "enum": [ + "proposed", + "published", + "active", + "expired", + "proposal_expired", + "rejected", + "slashed", + "error" + ], + "x-enum-varnames": [ + "DealProposed", + "DealPublished", + "DealActive", + "DealExpired", + "DealProposalExpired", + "DealRejected", + "DealSlashed", + "DealErrored" + ] + }, + "model.DealType": { + "type": "string", + "enum": [ + "market", + "pdp" + ], + "x-enum-varnames": [ + "DealTypeMarket", + "DealTypePDP" + ] + }, + "model.File": { + "type": "object", + "properties": { + "attachmentId": { + "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", + "type": "integer" + }, + "cid": { + "description": "CID is the CID of the file.", + "type": "string" + }, + "directoryId": { + "type": "integer" + }, + "fileRanges": { + "type": "array", + "items": { + "$ref": "#/definitions/model.FileRange" + } + }, + "hash": { + "description": "Hash is the hash of the file.", + "type": "string" + }, + "id": { + "type": "integer" + }, + "lastModifiedNano": { + "type": "integer" + }, + "path": { + "description": "Path is the relative path to the file inside the storage.", + "type": "string" + }, + "size": { + "description": "Size is the size of the file in bytes.", + "type": "integer" + } + } + }, + "model.FileRange": { + "type": "object", + "properties": { + "cid": { + "description": "CID is the CID of the range.", + "type": "string" + }, + "fileId": { + "type": "integer" + }, + "id": { + "type": "integer" + }, + "jobId": { + "description": "Associations", + "type": "integer" + }, + "length": { + "description": "Length is the length of the range in bytes.", + "type": "integer" + }, + "offset": { + "description": "Offset is the offset of the range inside the file.", + "type": "integer" + } + } + }, + "model.Job": { + "type": "object", + "properties": { + "attachmentId": { + "type": "integer" + }, + "errorMessage": { + "type": "string" + }, + "errorStackTrace": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "state": { + "$ref": "#/definitions/model.JobState" + }, + "type": { + "$ref": "#/definitions/model.JobType" + }, + "workerId": { + "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", + "type": "string" + } + } + }, + "model.JobState": { + "type": "string", + "enum": [ + "created", + "ready", + "paused", + "processing", + "complete", + "error" + ], + "x-enum-varnames": [ + "Created", + "Ready", + "Paused", + "Processing", + "Complete", + "Error" + ] + }, + "model.JobType": { + "type": "string", + "enum": [ + "scan", + "pack", + "daggen" + ], + "x-enum-varnames": [ + "Scan", + "Pack", + "DagGen" + ] + }, + "model.Preparation": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "deleteAfterExport": { + "description": "DeleteAfterExport is a flag that indicates whether the source files should be deleted after export.", + "type": "boolean" + }, + "id": { + "type": "integer" + }, + "maxSize": { + "type": "integer" + }, + "minPieceSize": { + "description": "Minimum piece size for the preparation, applies only to DAG and remainder pieces", + "type": "integer" + }, + "name": { + "type": "string" + }, + "noDag": { + "type": "boolean" + }, + "noInline": { + "type": "boolean" + }, + "outputStorages": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Storage" + } + }, + "pieceSize": { + "type": "integer" + }, + "sourceStorages": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Storage" + } + }, + "updatedAt": { + "type": "string" + }, + "walletId": { + "description": "Associations", + "type": "integer" + } + } + }, + "model.Schedule": { + "type": "object", + "properties": { + "allowedPieceCids": { + "type": "array", + "items": { + "type": "string" + } + }, + "announceToIpni": { + "type": "boolean" + }, + "createdAt": { + "type": "string" + }, + "dealType": { + "$ref": "#/definitions/model.DealType" + }, + "duration": { + "type": "integer" + }, + "errorMessage": { + "type": "string" + }, + "force": { + "type": "boolean" + }, + "httpHeaders": { + "$ref": "#/definitions/model.ConfigMap" + }, + "id": { + "type": "integer" + }, + "keepUnsealed": { + "type": "boolean" + }, + "maxPendingDealNumber": { + "type": "integer" + }, + "maxPendingDealSize": { + "type": "integer" + }, + "notes": { + "type": "string" + }, + "preparationId": { + "description": "Associations", + "type": "integer" + }, + "pricePerDeal": { + "type": "number" + }, + "pricePerGb": { + "type": "number" + }, + "pricePerGbEpoch": { + "type": "number" + }, + "provider": { + "type": "string" + }, + "scheduleCron": { + "type": "string" + }, + "scheduleCronPerpetual": { + "type": "boolean" + }, + "scheduleDealNumber": { + "type": "integer" + }, + "scheduleDealSize": { + "type": "integer" + }, + "startDelay": { + "type": "integer" + }, + "state": { + "$ref": "#/definitions/model.ScheduleState" + }, + "totalDealNumber": { + "type": "integer" + }, + "totalDealSize": { + "type": "integer" + }, + "updatedAt": { + "type": "string" + }, + "urlTemplate": { + "type": "string" + }, + "verified": { + "type": "boolean" + } + } + }, + "model.ScheduleState": { + "type": "string", + "enum": [ + "active", + "paused", + "error", + "completed" + ], + "x-enum-varnames": [ + "ScheduleActive", + "SchedulePaused", + "ScheduleError", + "ScheduleCompleted" + ] + }, + "model.Storage": { + "type": "object", + "properties": { + "clientConfig": { + "description": "ClientConfig is the HTTP configuration for the storage, if applicable.", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "Config is a map of key-value pairs that can be used to store RClone options.", + "allOf": [ + { + "$ref": "#/definitions/model.ConfigMap" + } + ] + }, + "createdAt": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "path": { + "description": "Path is the path to the storage root.", + "type": "string" + }, + "preparationsAsOutput": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Preparation" + } + }, + "preparationsAsSource": { + "description": "Associations", + "type": "array", + "items": { + "$ref": "#/definitions/model.Preparation" + } + }, + "type": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "model.Wallet": { + "type": "object", + "properties": { + "actorId": { + "description": "nullable, links to on-chain actor f0...", + "type": "string" + }, + "address": { + "description": "filecoin address (f1.../f3...)", + "type": "string" + }, + "id": { + "type": "integer" + }, + "keyPath": { + "description": "absolute path to key file", + "type": "string" + }, + "keyStore": { + "description": "local, yubikey, aws-kms, etc", + "type": "string" + }, + "name": { + "description": "optional label", + "type": "string" + } + } + }, + "schedule.CreateRequest": { + "type": "object", + "properties": { + "allowedPieceCids": { + "description": "Allowed piece CIDs in this schedule", + "type": "array", + "items": { + "type": "string" + } + }, + "dealType": { + "description": "Deal type: market (f05) or pdp (f41)", + "type": "string" + }, + "duration": { + "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "type": "string", + "default": "12840h" + }, + "force": { + "description": "Force to send out deals regardless of replication restriction", + "type": "boolean" + }, + "httpHeaders": { + "description": "http headers to be passed with the request (i.e. key=value)", + "type": "array", + "items": { + "type": "string" + } + }, + "ipni": { + "description": "Whether the deal should be IPNI", + "type": "boolean", + "default": true + }, + "keepUnsealed": { + "description": "Whether the deal should be kept unsealed", + "type": "boolean", + "default": true + }, + "maxPendingDealNumber": { + "description": "Max pending deal number", + "type": "integer" + }, + "maxPendingDealSize": { + "description": "Max pending deal size in human readable format, i.e. 100 TiB", + "type": "string" + }, + "notes": { + "description": "Notes", + "type": "string" + }, + "preparation": { + "description": "Preparation ID or name", + "type": "string" + }, + "pricePerDeal": { + "description": "Price in FIL per deal", + "type": "number", + "default": 0 + }, + "pricePerGb": { + "description": "Price in FIL per GiB", + "type": "number", + "default": 0 + }, + "pricePerGbEpoch": { + "description": "Price in FIL per GiB per epoch", + "type": "number", + "default": 0 + }, + "provider": { + "description": "Provider", + "type": "string" + }, + "scheduleCron": { + "description": "Schedule cron pattern", + "type": "string" + }, + "scheduleCronPerpetual": { + "description": "Whether a cron schedule should run in definitely", + "type": "boolean" + }, + "scheduleDealNumber": { + "description": "Number of deals per scheduled time", + "type": "integer" + }, + "scheduleDealSize": { + "description": "Size of deals per schedule trigger in human readable format, i.e. 100 TiB", + "type": "string" + }, + "startDelay": { + "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "type": "string", + "default": "72h" + }, + "totalDealNumber": { + "description": "Total number of deals", + "type": "integer" + }, + "totalDealSize": { + "description": "Total size of deals in human readable format, i.e. 100 TiB", + "type": "string" + }, + "urlTemplate": { + "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "type": "string" + }, + "verified": { + "description": "Whether the deal should be verified", + "type": "boolean", + "default": true + } + } + }, + "schedule.UpdateRequest": { + "type": "object", + "properties": { + "allowedPieceCids": { + "description": "Allowed piece CIDs in this schedule", + "type": "array", + "items": { + "type": "string" + } + }, + "dealType": { + "description": "Deal type: market (f05) or pdp (f41)", + "type": "string" + }, + "duration": { + "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "type": "string", + "default": "12840h" + }, + "force": { + "description": "Force to send out deals regardless of replication restriction", + "type": "boolean" + }, + "httpHeaders": { + "description": "http headers to be passed with the request (i.e. key=value)", + "type": "array", + "items": { + "type": "string" + } + }, + "ipni": { + "description": "Whether the deal should be IPNI", + "type": "boolean", + "default": true + }, + "keepUnsealed": { + "description": "Whether the deal should be kept unsealed", + "type": "boolean", + "default": true + }, + "maxPendingDealNumber": { + "description": "Max pending deal number", + "type": "integer" + }, + "maxPendingDealSize": { + "description": "Max pending deal size in human readable format, i.e. 100 TiB", + "type": "string" + }, + "notes": { + "description": "Notes", + "type": "string" + }, + "pricePerDeal": { + "description": "Price in FIL per deal", + "type": "number", + "default": 0 + }, + "pricePerGb": { + "description": "Price in FIL per GiB", + "type": "number", + "default": 0 + }, + "pricePerGbEpoch": { + "description": "Price in FIL per GiB per epoch", + "type": "number", + "default": 0 + }, + "scheduleCron": { + "description": "Schedule cron pattern", + "type": "string" + }, + "scheduleCronPerpetual": { + "description": "Whether a cron schedule should run in definitely", + "type": "boolean" + }, + "scheduleDealNumber": { + "description": "Number of deals per scheduled time", + "type": "integer" + }, + "scheduleDealSize": { + "description": "Size of deals per schedule trigger in human readable format, i.e. 100 TiB", + "type": "string" + }, + "startDelay": { + "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "type": "string", + "default": "72h" + }, + "totalDealNumber": { + "description": "Total number of deals", + "type": "integer" + }, + "totalDealSize": { + "description": "Total size of deals in human readable format, i.e. 100 TiB", + "type": "string" + }, + "urlTemplate": { + "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "type": "string" + }, + "verified": { + "description": "Whether the deal should be verified", + "type": "boolean", + "default": true + } + } + }, + "storage.DirEntry": { + "type": "object", + "properties": { + "dirId": { + "type": "string" + }, + "hash": { + "type": "string" + }, + "isDir": { + "type": "boolean" + }, + "lastModified": { + "type": "string" + }, + "numItems": { + "type": "integer" + }, + "path": { + "type": "string" + }, + "size": { + "type": "integer" + } + } + }, + "storage.RenameRequest": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + } + } + }, + "storage.azureblobConfig": { + "type": "object", + "properties": { + "accessTier": { + "description": "Access tier of blob: hot, cool, cold or archive.", + "type": "string" + }, + "account": { + "description": "Azure Storage Account Name.", + "type": "string" + }, + "archiveTierDelete": { + "description": "Delete archive tier blobs before overwriting.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Upload chunk size.", + "type": "string", + "default": "4Mi" + }, + "clientCertificatePassword": { + "description": "Password for the certificate file (optional).", + "type": "string" + }, + "clientCertificatePath": { + "description": "Path to a PEM or PKCS12 certificate file including the private key.", + "type": "string" + }, + "clientId": { + "description": "The ID of the client in use.", + "type": "string" + }, + "clientSecret": { + "description": "One of the service principal's client secrets", + "type": "string" + }, + "clientSendCertificateChain": { + "description": "Send the certificate chain when using certificate auth.", + "type": "boolean", + "default": false + }, + "connectionString": { + "description": "Storage Connection String.", + "type": "string" + }, + "copyConcurrency": { + "description": "Concurrency for multipart copy.", + "type": "integer", + "default": 512 + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "8Mi" + }, + "deleteSnapshots": { + "description": "Set to specify how to deal with snapshots on blob deletion.", + "type": "string", + "example": "" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableInstanceDiscovery": { + "description": "Skip requesting Microsoft Entra instance metadata", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8" + }, + "endpoint": { + "description": "Endpoint for the service.", + "type": "string" + }, + "envAuth": { + "description": "Read credentials from runtime (environment variables, CLI or MSI).", + "type": "boolean", + "default": false + }, + "key": { + "description": "Storage Account Shared Key.", + "type": "string" + }, + "listChunk": { + "description": "Size of blob list.", + "type": "integer", + "default": 5000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "msiClientId": { + "description": "Object ID of the user-assigned MSI to use, if any.", + "type": "string" + }, + "msiMiResId": { + "description": "Azure resource ID of the user-assigned MSI to use, if any.", + "type": "string" + }, + "msiObjectId": { + "description": "Object ID of the user-assigned MSI to use, if any.", + "type": "string" + }, + "noCheckContainer": { + "description": "If set, don't attempt to check the container exists or create it.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "password": { + "description": "The user's password", + "type": "string" + }, + "publicAccess": { + "description": "Public access level of a container: blob or container.", + "type": "string", + "example": "" + }, + "sasUrl": { + "description": "SAS URL for container level access only.", + "type": "string" + }, + "servicePrincipalFile": { + "description": "Path to file containing credentials for use with a service principal.", + "type": "string" + }, + "tenant": { + "description": "ID of the service principal's tenant. Also called its directory ID.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 16 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload (\u003c= 256 MiB) (deprecated).", + "type": "string" + }, + "useAz": { + "description": "Use Azure CLI tool az for authentication", + "type": "boolean", + "default": false + }, + "useCopyBlob": { + "description": "Whether to use the Copy Blob API when copying to the same storage account.", + "type": "boolean", + "default": true + }, + "useEmulator": { + "description": "Uses local storage emulator if provided as 'true'.", + "type": "boolean", + "default": false + }, + "useMsi": { + "description": "Use a managed service identity to authenticate (only works in Azure).", + "type": "boolean", + "default": false + }, + "username": { + "description": "User name (usually an email address)", + "type": "string" + } + } + }, + "storage.b2Config": { + "type": "object", + "properties": { + "account": { + "description": "Account ID or Application Key ID.", + "type": "string" + }, + "chunkSize": { + "description": "Upload chunk size.", + "type": "string", + "default": "96Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4Gi" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Disable checksums for large (\u003e upload cutoff) files.", + "type": "boolean", + "default": false + }, + "downloadAuthDuration": { + "description": "Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.", + "type": "string", + "default": "1w" + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for the service.", + "type": "string" + }, + "hardDelete": { + "description": "Permanently delete files on remote removal, otherwise hide files.", + "type": "boolean", + "default": false + }, + "key": { + "description": "Application Key.", + "type": "string" + }, + "lifecycle": { + "description": "Set the number of days deleted files should be kept when creating a bucket.", + "type": "integer", + "default": 0 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data", + "type": "string", + "example": "" + }, + "sseCustomerKeyBase64": { + "description": "To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data", + "type": "string", + "example": "" + }, + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" + }, + "testMode": { + "description": "A flag string for X-Bz-Test-Mode header for debugging.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false + } + } + }, + "storage.boxConfig": { + "type": "object", + "properties": { + "accessToken": { + "description": "Box App Primary Access Token", + "type": "string" + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "boxConfigFile": { + "description": "Box App config.json location", + "type": "string" + }, + "boxSubType": { + "type": "string", + "default": "user", + "example": "user" + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "commitRetries": { + "description": "Max number of times to try committing a multipart file.", + "type": "integer", + "default": 100 + }, + "configCredentials": { + "description": "Box App config.json contents.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot" + }, + "impersonate": { + "description": "Impersonate this user ID when using a service account.", + "type": "string" + }, + "listChunk": { + "description": "Size of listing chunk 1-1000.", + "type": "integer", + "default": 1000 + }, + "ownedBy": { + "description": "Only show items owned by the login (email address) passed in.", + "type": "string" + }, + "rootFolderId": { + "description": "Fill in for rclone to use a non root folder as its starting point.", + "type": "string", + "default": "0" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "uploadCutoff": { + "description": "Cutoff for switching to multipart upload (\u003e= 50 MiB).", + "type": "string", + "default": "50Mi" + } + } + }, + "storage.createAzureblobStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.azureblobConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createB2StorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.b2Config" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createBoxStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.boxConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createDriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.driveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createDropboxStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.dropboxConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createFichierStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.fichierConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createFilefabricStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.filefabricConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createFtpStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.ftpConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createGcsStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.gcsConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createGphotosStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.gphotosConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createHdfsStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.hdfsConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createHidriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.hidriveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createHttpStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.httpConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createInternetarchiveStorageRequest": { + "type": "object" + }, + "storage.createJottacloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.jottacloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createKoofrDigistorageStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.koofrDigistorageConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createKoofrKoofrStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.koofrKoofrConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createKoofrOtherStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.koofrOtherConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createLocalStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.localConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createMailruStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.mailruConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createMegaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.megaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createNetstorageStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.netstorageConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOnedriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.onedriveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosEnv_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosEnv_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosInstance_principal_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosInstance_principal_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosNo_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosNo_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosResource_principal_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosResource_principal_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosUser_principal_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosUser_principal_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosWorkload_identity_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosWorkload_identity_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOpendriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.opendriveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createPcloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.pcloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createPremiumizemeStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.premiumizemeConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createPutioStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.putioConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createQingstorStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.qingstorConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3AWSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3AWSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3AlibabaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3AlibabaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ArvanCloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ArvanCloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3BizflyCloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3BizflyCloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3CephStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3CephConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ChinaMobileStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ChinaMobileConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3CloudflareStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3CloudflareConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3CubbitStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3CubbitConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3DigitalOceanStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3DigitalOceanConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3DreamhostStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3DreamhostConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ExabaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ExabaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3FileLuStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3FileLuConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3FlashBladeStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3FlashBladeConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3GCSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3GCSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3HetznerStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3HetznerConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3HuaweiOBSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3HuaweiOBSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3IBMCOSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3IBMCOSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3IDriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3IDriveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3IONOSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3IONOSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3IntercoloStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3IntercoloConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3LeviiaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LeviiaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3LiaraStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LiaraConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3LinodeStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LinodeConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3LyveCloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LyveCloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3MagaluStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3MagaluConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3MegaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3MegaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3MinioStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3MinioConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3NeteaseStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3NeteaseConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3OVHcloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3OVHcloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3OtherStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3OtherConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3OutscaleStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3OutscaleConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3PetaboxStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3PetaboxConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3QiniuStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3QiniuConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3RabataStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3RabataConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3RackCorpStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3RackCorpConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3RcloneStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3RcloneConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ScalewayStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ScalewayConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3SeaweedFSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SeaweedFSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3SelectelStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SelectelConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ServercoreStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ServercoreConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3SpectraLogicStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SpectraLogicConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3StackPathStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3StackPathConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3StorjStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3StorjConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3SynologyStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SynologyConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3TencentCOSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3TencentCOSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3WasabiStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3WasabiConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ZataStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ZataConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSeafileStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.seafileConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSftpStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.sftpConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSharefileStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.sharefileConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSiaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.siaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSmbStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.smbConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createStorjExistingStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.storjExistingConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createStorjNewStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.storjNewConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSugarsyncStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.sugarsyncConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSwiftStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.swiftConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createUnionStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.unionConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createWebdavStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.webdavConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createYandexStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.yandexConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createZohoStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.zohoConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.driveConfig": { + "type": "object", + "properties": { + "acknowledgeAbuse": { + "description": "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.", + "type": "boolean", + "default": false + }, + "allowImportNameChange": { + "description": "Allow the filetype to change when uploading Google docs.", + "type": "boolean", + "default": false + }, + "alternateExport": { + "description": "Deprecated: No longer needed.", + "type": "boolean", + "default": false + }, + "authOwnerOnly": { + "description": "Only consider files owned by the authenticated user.", + "type": "boolean", + "default": false + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "chunkSize": { + "description": "Upload chunk size.", + "type": "string", + "default": "8Mi" + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "Google Application Client Id", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "copyShortcutContent": { + "description": "Server side copy contents of shortcuts instead of the shortcut.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableHttp2": { + "description": "Disable drive using http2.", + "type": "boolean", + "default": true + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "InvalidUtf8" + }, + "envAuth": { + "description": "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "exportFormats": { + "description": "Comma separated list of preferred formats for downloading Google docs.", + "type": "string", + "default": "docx,xlsx,pptx,svg" + }, + "fastListBugFix": { + "description": "Work around a bug in Google Drive listing.", + "type": "boolean", + "default": true + }, + "formats": { + "description": "Deprecated: See export_formats.", + "type": "string" + }, + "impersonate": { + "description": "Impersonate this user when using a service account.", + "type": "string" + }, + "importFormats": { + "description": "Comma separated list of preferred formats for uploading Google docs.", + "type": "string" + }, + "keepRevisionForever": { + "description": "Keep new head revision of each file forever.", + "type": "boolean", + "default": false + }, + "listChunk": { + "description": "Size of listing chunk 100-1000, 0 to disable.", + "type": "integer", + "default": 1000 + }, + "metadataEnforceExpansiveAccess": { + "description": "Whether the request should enforce expansive access rules.", + "type": "boolean", + "default": false + }, + "metadataLabels": { + "description": "Control whether labels should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "metadataOwner": { + "description": "Control whether owner should be read or written in metadata.", + "type": "string", + "default": "read", + "example": "off" + }, + "metadataPermissions": { + "description": "Control whether permissions should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "pacerBurst": { + "description": "Number of API calls to allow without sleeping.", + "type": "integer", + "default": 100 + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "100ms" + }, + "resourceKey": { + "description": "Resource key for accessing a link-shared file.", + "type": "string" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "scope": { + "description": "Comma separated list of scopes that rclone should use when requesting access from drive.", + "type": "string", + "example": "drive" + }, + "serverSideAcrossConfigs": { + "description": "Deprecated: use --server-side-across-configs instead.", + "type": "boolean", + "default": false + }, + "serviceAccountCredentials": { + "description": "Service Account Credentials JSON blob.", + "type": "string" + }, + "serviceAccountFile": { + "description": "Service Account Credentials JSON file path.", + "type": "string" + }, + "sharedWithMe": { + "description": "Only show files that are shared with me.", + "type": "boolean", + "default": false + }, + "showAllGdocs": { + "description": "Show all Google Docs including non-exportable ones in listings.", + "type": "boolean", + "default": false + }, + "sizeAsQuota": { + "description": "Show sizes as storage quota usage, not actual size.", + "type": "boolean", + "default": false + }, + "skipChecksumGphotos": { + "description": "Skip checksums on Google photos and videos only.", + "type": "boolean", + "default": false + }, + "skipDanglingShortcuts": { + "description": "If set skip dangling shortcut files.", + "type": "boolean", + "default": false + }, + "skipGdocs": { + "description": "Skip google documents in all listings.", + "type": "boolean", + "default": false + }, + "skipShortcuts": { + "description": "If set skip shortcut files.", + "type": "boolean", + "default": false + }, + "starredOnly": { + "description": "Only show files that are starred.", + "type": "boolean", + "default": false + }, + "stopOnDownloadLimit": { + "description": "Make download limit errors be fatal.", + "type": "boolean", + "default": false + }, + "stopOnUploadLimit": { + "description": "Make upload limit errors be fatal.", + "type": "boolean", + "default": false + }, + "teamDrive": { + "description": "ID of the Shared Drive (Team Drive).", + "type": "string" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "trashedOnly": { + "description": "Only show files that are in the trash.", + "type": "boolean", + "default": false + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "8Mi" + }, + "useCreatedDate": { + "description": "Use file created date instead of modified date.", + "type": "boolean", + "default": false + }, + "useSharedDate": { + "description": "Use date file was shared instead of modified date.", + "type": "boolean", + "default": false + }, + "useTrash": { + "description": "Send files to the trash instead of deleting permanently.", + "type": "boolean", + "default": true + }, + "v2DownloadMinSize": { + "description": "If Object's are greater, use drive v2 API to download.", + "type": "string", + "default": "off" + } + } + }, + "storage.dropboxConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "batchCommitTimeout": { + "description": "Max time to wait for a batch to finish committing. (no longer used)", + "type": "string", + "default": "10m0s" + }, + "batchMode": { + "description": "Upload file batching sync|async|off.", + "type": "string", + "default": "sync" + }, + "batchSize": { + "description": "Max number of files in upload batch.", + "type": "integer", + "default": 0 + }, + "batchTimeout": { + "description": "Max time to allow an idle upload batch before uploading.", + "type": "string", + "default": "0s" + }, + "chunkSize": { + "description": "Upload chunk size (\u003c 150Mi).", + "type": "string", + "default": "48Mi" + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot" + }, + "exportFormats": { + "description": "Comma separated list of preferred formats for exporting files", + "type": "string", + "default": "html,md" + }, + "impersonate": { + "description": "Impersonate this user when using a business account.", + "type": "string" + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "10ms" + }, + "rootNamespace": { + "description": "Specify a different Dropbox namespace ID to use as the root for all paths.", + "type": "string" + }, + "sharedFiles": { + "description": "Instructs rclone to work on individual shared files.", + "type": "boolean", + "default": false + }, + "sharedFolders": { + "description": "Instructs rclone to work on shared folders.", + "type": "boolean", + "default": false + }, + "showAllExports": { + "description": "Show all exportable files in listings.", + "type": "boolean", + "default": false + }, + "skipExports": { + "description": "Skip exportable files in all listings.", + "type": "boolean", + "default": false + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", "type": "string" } } }, - "api.HTTPError": { + "storage.fichierConfig": { "type": "object", "properties": { - "err": { + "apiKey": { + "description": "Your API Key, get it from https://1fichier.com/console/params.pl.", + "type": "string" + }, + "cdn": { + "description": "Set if you wish to use CDN download links.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot" + }, + "filePassword": { + "description": "If you want to download a shared file that is password protected, add this parameter.", + "type": "string" + }, + "folderPassword": { + "description": "If you want to list the files in a shared folder that is password protected, add this parameter.", + "type": "string" + }, + "sharedFolder": { + "description": "If you want to download a shared folder, add this parameter.", + "type": "string" + } + } + }, + "storage.filefabricConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Del,Ctl,InvalidUtf8,Dot" + }, + "permanentToken": { + "description": "Permanent Authentication Token.", + "type": "string" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "token": { + "description": "Session Token.", + "type": "string" + }, + "tokenExpiry": { + "description": "Token expiry time.", + "type": "string" + }, + "url": { + "description": "URL of the Enterprise File Fabric to connect to.", + "type": "string", + "example": "https://storagemadeeasy.com" + }, + "version": { + "description": "Version read from the file fabric.", + "type": "string" + } + } + }, + "storage.ftpConfig": { + "type": "object", + "properties": { + "allowInsecureTlsCiphers": { + "description": "Allow insecure TLS ciphers", + "type": "boolean", + "default": false + }, + "askPassword": { + "description": "Allow asking for FTP password when needed.", + "type": "boolean", + "default": false + }, + "closeTimeout": { + "description": "Maximum time to wait for a response to close.", + "type": "string", + "default": "1m0s" + }, + "concurrency": { + "description": "Maximum number of FTP simultaneous connections, 0 for unlimited.", + "type": "integer", + "default": 0 + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableEpsv": { + "description": "Disable using EPSV even if server advertises support.", + "type": "boolean", + "default": false + }, + "disableMlsd": { + "description": "Disable using MLSD even if server advertises support.", + "type": "boolean", + "default": false + }, + "disableTls13": { + "description": "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", + "type": "boolean", + "default": false + }, + "disableUtf8": { + "description": "Disable using UTF-8 even if server advertises support.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Del,Ctl,RightSpace,Dot", + "example": "Asterisk,Ctl,Dot,Slash" + }, + "explicitTls": { + "description": "Use Explicit FTPS (FTP over TLS).", + "type": "boolean", + "default": false + }, + "forceListHidden": { + "description": "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", + "type": "boolean", + "default": false + }, + "host": { + "description": "FTP host to connect to.", + "type": "string" + }, + "httpProxy": { + "description": "URL for HTTP CONNECT proxy", + "type": "string" + }, + "idleTimeout": { + "description": "Max time before closing idle connections.", + "type": "string", + "default": "1m0s" + }, + "noCheckCertificate": { + "description": "Do not verify the TLS certificate of the server.", + "type": "boolean", + "default": false + }, + "noCheckUpload": { + "description": "Don't check the upload is OK", + "type": "boolean", + "default": false + }, + "pass": { + "description": "FTP password.", + "type": "string" + }, + "port": { + "description": "FTP port number.", + "type": "integer", + "default": 21 + }, + "shutTimeout": { + "description": "Maximum time to wait for data connection closing status.", + "type": "string", + "default": "1m0s" + }, + "socksProxy": { + "description": "Socks 5 proxy host.", "type": "string" + }, + "tls": { + "description": "Use Implicit FTPS (FTP over TLS).", + "type": "boolean", + "default": false + }, + "tlsCacheSize": { + "description": "Size of TLS session cache for all control and data connections.", + "type": "integer", + "default": 32 + }, + "user": { + "description": "FTP username.", + "type": "string", + "default": "$USER" + }, + "writingMdtm": { + "description": "Use MDTM to set modification time (VsFtpd quirk)", + "type": "boolean", + "default": false } } }, - "dataprep.AddPieceRequest": { + "storage.gcsConfig": { "type": "object", - "required": [ - "pieceCid" - ], "properties": { - "fileSize": { - "description": "File size of the CAR file, this is required for boost online deal", - "type": "integer" + "accessToken": { + "description": "Short-lived access token.", + "type": "string" }, - "pieceCid": { - "description": "CID of the piece", + "anonymous": { + "description": "Access public buckets and objects without credentials.", + "type": "boolean", + "default": false + }, + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "pieceSize": { - "description": "Size of the piece (required for external import, optional if piece exists in DB)", + "bucketAcl": { + "description": "Access Control List for new buckets.", + "type": "string", + "example": "authenticatedRead" + }, + "bucketPolicyOnly": { + "description": "Access checks should use bucket-level IAM policies.", + "type": "boolean", + "default": false + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "rootCid": { - "description": "Root CID of the CAR file, used to populate the label field of storage deal", + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,CrLf,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Custom endpoint for the storage API. Leave blank to use the provider default.", + "type": "string", + "example": "storage.example.org" + }, + "envAuth": { + "description": "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "location": { + "description": "Location for the newly created buckets.", + "type": "string", + "example": "" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "objectAcl": { + "description": "Access Control List for new objects.", + "type": "string", + "example": "authenticatedRead" + }, + "projectNumber": { + "description": "Project number.", + "type": "string" + }, + "serviceAccountCredentials": { + "description": "Service Account Credentials JSON blob.", + "type": "string" + }, + "serviceAccountFile": { + "description": "Service Account Credentials JSON file path.", + "type": "string" + }, + "storageClass": { + "description": "The storage class to use when storing objects in Google Cloud Storage.", + "type": "string", + "example": "" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "userProject": { + "description": "User project.", "type": "string" } } }, - "dataprep.CreateRequest": { + "storage.gphotosConfig": { "type": "object", - "required": [ - "name" - ], "properties": { - "deleteAfterExport": { - "description": "Whether to delete the source files after export", + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "batchCommitTimeout": { + "description": "Max time to wait for a batch to finish committing. (no longer used)", + "type": "string", + "default": "10m0s" + }, + "batchMode": { + "description": "Upload file batching sync|async|off.", + "type": "string", + "default": "sync" + }, + "batchSize": { + "description": "Max number of files in upload batch.", + "type": "integer", + "default": 0 + }, + "batchTimeout": { + "description": "Max time to allow an idle upload batch before uploading.", + "type": "string", + "default": "0s" + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", "type": "boolean", "default": false }, - "maxSize": { - "description": "Maximum size of the CAR files to be created", - "type": "string", - "default": "31.5GiB" + "clientId": { + "description": "OAuth Client Id.", + "type": "string" }, - "minPieceSize": { - "description": "Minimum piece size for the preparation, applies only to DAG and remainer pieces", + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "default": "1MiB" + "default": "Slash,CrLf,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the preparation", + "includeArchived": { + "description": "Also view and download archived media.", + "type": "boolean", + "default": false + }, + "proxy": { + "description": "Use the gphotosdl proxy for downloading the full resolution images", "type": "string" }, - "noDag": { - "description": "Whether to disable maintaining folder dag structure for the sources. If disabled, DagGen will not be possible and folders will not have an associated CID.", + "readOnly": { + "description": "Set to make the Google Photos backend read only.", "type": "boolean", "default": false }, - "noInline": { - "description": "Whether to disable inline storage for the preparation. Can save database space but requires at least one output storage.", + "readSize": { + "description": "Set to read the size of media items.", "type": "boolean", "default": false }, - "outputStorages": { - "description": "Name of Output storage systems to be used for the output", - "type": "array", - "items": { - "type": "string" - } + "startYear": { + "description": "Year limits the photos to be downloaded to those which are uploaded after the given year.", + "type": "integer", + "default": 2000 }, - "pieceSize": { - "description": "Target piece size of the CAR files used for piece commitment calculation", + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "sourceStorages": { - "description": "Name of Source storage systems to be used for the source", - "type": "array", - "items": { - "type": "string" - } + "tokenUrl": { + "description": "Token server url.", + "type": "string" } } }, - "dataprep.DeletePieceRequest": { + "storage.hdfsConfig": { "type": "object", "properties": { - "deleteCar": { - "description": "Delete the physical CAR file from storage (default: true)", - "type": "boolean" + "dataTransferProtection": { + "description": "Kerberos data transfer protection: authentication|integrity|privacy.", + "type": "string", + "example": "privacy" }, - "force": { - "description": "Delete even if deals reference this piece", - "type": "boolean" + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Colon,Del,Ctl,InvalidUtf8,Dot" + }, + "namenode": { + "description": "Hadoop name nodes and ports.", + "type": "string" + }, + "servicePrincipalName": { + "description": "Kerberos service principal name for the namenode.", + "type": "string" + }, + "username": { + "description": "Hadoop user name.", + "type": "string", + "example": "root" } } }, - "dataprep.DirEntry": { + "storage.hidriveConfig": { "type": "object", "properties": { - "cid": { + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "fileVersions": { - "type": "array", - "items": { - "$ref": "#/definitions/dataprep.Version" - } + "chunkSize": { + "description": "Chunksize for chunked uploads.", + "type": "string", + "default": "48Mi" }, - "isDir": { - "type": "boolean" + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false }, - "path": { + "clientId": { + "description": "OAuth Client Id.", "type": "string" - } - } - }, - "dataprep.ExploreResult": { - "type": "object", - "properties": { - "cid": { + }, + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "path": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "subEntries": { - "type": "array", - "items": { - "$ref": "#/definitions/dataprep.DirEntry" - } - } - } - }, - "dataprep.PieceList": { - "type": "object", - "properties": { - "attachmentId": { - "type": "integer" + "disableFetchingMemberCount": { + "description": "Do not fetch number of objects in directories unless it is absolutely necessary.", + "type": "boolean", + "default": false }, - "pieces": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Car" - } + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Dot" }, - "source": { - "$ref": "#/definitions/model.Storage" + "endpoint": { + "description": "Endpoint for the service.", + "type": "string", + "default": "https://api.hidrive.strato.com/2.1" }, - "storageId": { - "type": "integer" - } - } - }, - "dataprep.RemoveRequest": { - "type": "object", - "properties": { - "removeCars": { - "type": "boolean" - } - } - }, - "dataprep.RenameRequest": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - } - } - }, - "dataprep.Version": { - "type": "object", - "properties": { - "cid": { - "type": "string" + "rootPrefix": { + "description": "The root/parent folder for all paths.", + "type": "string", + "default": "/", + "example": "/" }, - "hash": { - "type": "string" + "scopeAccess": { + "description": "Access permissions that rclone should use when requesting access from HiDrive.", + "type": "string", + "default": "rw", + "example": "rw" }, - "id": { - "type": "integer" + "scopeRole": { + "description": "User-level that rclone should use when requesting access from HiDrive.", + "type": "string", + "default": "user", + "example": "user" }, - "lastModified": { + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "size": { - "type": "integer" + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for chunked uploads.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff/Threshold for chunked uploads.", + "type": "string", + "default": "96Mi" } } }, - "deal.ListDealRequest": { + "storage.httpConfig": { "type": "object", "properties": { - "dealTypes": { - "description": "deal type filter (market for f05, pdp for f41)", - "type": "array", - "items": { - "$ref": "#/definitions/model.DealType" - } + "description": { + "description": "Description of the remote.", + "type": "string" }, - "preparations": { - "description": "preparation ID or name filter", - "type": "array", - "items": { - "type": "string" - } + "headers": { + "description": "Set HTTP headers for all transactions.", + "type": "string" }, - "providers": { - "description": "provider filter", - "type": "array", - "items": { - "type": "string" - } + "noEscape": { + "description": "Do not escape URL metacharacters in path names.", + "type": "boolean", + "default": false }, - "schedules": { - "description": "schedule id filter", - "type": "array", - "items": { - "type": "integer" - } + "noHead": { + "description": "Don't use HEAD requests.", + "type": "boolean", + "default": false }, - "sources": { - "description": "source ID or name filter", - "type": "array", - "items": { - "type": "string" - } + "noSlash": { + "description": "Set this if the site doesn't end directories with /.", + "type": "boolean", + "default": false }, - "states": { - "description": "state filter", - "type": "array", - "items": { - "$ref": "#/definitions/model.DealState" - } + "url": { + "description": "URL of HTTP host to connect to.", + "type": "string" } } }, - "deal.Proposal": { + "storage.jottacloudConfig": { "type": "object", "properties": { - "clientAddress": { - "description": "Client address", + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "duration": { - "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", - "type": "string", - "default": "12740h" - }, - "fileSize": { - "description": "File size in bytes for boost to fetch the CAR file", - "type": "integer" - }, - "httpHeaders": { - "description": "http headers to be passed with the request (i.e. key=value)", - "type": "array", - "items": { - "type": "string" - } - }, - "ipni": { - "description": "Whether the deal should be IPNI", - "type": "boolean", - "default": true - }, - "keepUnsealed": { - "description": "Whether the deal should be kept unsealed", + "clientCredentials": { + "description": "Use client credentials OAuth flow.", "type": "boolean", - "default": true + "default": false }, - "pieceCid": { - "description": "Piece CID", + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "pieceSize": { - "description": "Piece size", + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "pricePerDeal": { - "description": "Price in FIL per deal", - "type": "number", - "default": 0 - }, - "pricePerGb": { - "description": "Price in FIL per GiB", - "type": "number", - "default": 0 + "description": { + "description": "Description of the remote.", + "type": "string" }, - "pricePerGbEpoch": { - "description": "Price in FIL per GiB per epoch", - "type": "number", - "default": 0 + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot" }, - "providerId": { - "description": "Provider ID", - "type": "string" + "hardDelete": { + "description": "Delete files permanently rather than putting them into the trash.", + "type": "boolean", + "default": false }, - "rootCid": { - "description": "Root CID that is required as part of the deal proposal, if empty, will be set to empty CID", + "md5MemoryLimit": { + "description": "Files bigger than this will be cached on disk to calculate the MD5 if required.", "type": "string", - "default": "bafkqaaa" + "default": "10Mi" }, - "startDelay": { - "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", - "type": "string", - "default": "72h" + "noVersions": { + "description": "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", + "type": "boolean", + "default": false }, - "urlTemplate": { - "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "verified": { - "description": "Whether the deal should be verified", + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "trashedOnly": { + "description": "Only show files that are in the trash.", "type": "boolean", - "default": true - } - } - }, - "file.DealsForFileRange": { - "type": "object", - "properties": { - "deals": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Deal" - } + "default": false }, - "fileRange": { - "$ref": "#/definitions/model.FileRange" + "uploadResumeLimit": { + "description": "Files bigger than this can be resumed if the upload fail's.", + "type": "string", + "default": "10Mi" } } }, - "file.Info": { + "storage.koofrDigistorageConfig": { "type": "object", "properties": { - "path": { - "description": "Path to the new file, relative to the source", + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "job.SourceStatus": { - "type": "object", - "properties": { - "attachmentId": { - "type": "integer" }, - "jobs": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Job" - } + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "output": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Storage" - } + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" }, - "source": { - "$ref": "#/definitions/model.Storage" + "password": { + "description": "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.", + "type": "string" }, - "storageId": { - "type": "integer" + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true + }, + "user": { + "description": "Your user name.", + "type": "string" } } }, - "model.Car": { + "storage.koofrKoofrConfig": { "type": "object", "properties": { - "attachmentId": { - "type": "integer" - }, - "createdAt": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "fileSize": { - "type": "integer" - }, - "id": { - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "jobId": { - "type": "integer" + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" }, - "minPieceSizePadding": { - "description": "MinPieceSizePadding tracks virtual padding for inline mode only. Inline: stores padding amount, PieceReader serves zeros virtually. Non-inline: always 0, literal zeros are written to CAR file for Curio TreeD compatibility.", - "type": "integer" + "password": { + "description": "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.", + "type": "string" }, - "numOfFiles": { - "type": "integer" + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true }, - "pieceCid": { + "user": { + "description": "Your user name.", + "type": "string" + } + } + }, + "storage.koofrOtherConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "pieceSize": { - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "pieceType": { - "description": "PieceType indicates whether this is a data piece or DAG piece", + "endpoint": { + "description": "The Koofr API endpoint to use.", "type": "string" }, - "preparationId": { - "description": "Association - SET NULL for fast prep deletion, async cleanup", - "type": "integer" + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" }, - "rootCid": { + "password": { + "description": "Your password for rclone (generate one at your service's settings page).", "type": "string" }, - "storageId": { - "type": "integer" + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true }, - "storagePath": { - "description": "StoragePath is the path to the CAR file inside the storage. If the StorageID is nil but StoragePath is not empty, it means the CAR file is stored at the local absolute path.", + "user": { + "description": "Your user name.", "type": "string" } } }, - "model.ClientConfig": { + "storage.localConfig": { "type": "object", "properties": { - "caCert": { - "description": "Paths to CA certificate used to verify servers", - "type": "array", - "items": { - "type": "string" - } - }, - "clientCert": { - "description": "Path to Client SSL certificate (PEM) for mutual TLS auth", - "type": "string" + "caseInsensitive": { + "description": "Force the filesystem to report itself as case insensitive.", + "type": "boolean", + "default": false }, - "clientKey": { - "description": "Path to Client SSL private key (PEM) for mutual TLS auth", - "type": "string" + "caseSensitive": { + "description": "Force the filesystem to report itself as case sensitive.", + "type": "boolean", + "default": false }, - "connectTimeout": { - "description": "HTTP Client Connect timeout", - "type": "integer" + "copyLinks": { + "description": "Follow symlinks and copy the pointed to item.", + "type": "boolean", + "default": false }, - "disableHttp2": { - "description": "Disable HTTP/2 in the transport", - "type": "boolean" + "description": { + "description": "Description of the remote.", + "type": "string" }, - "disableHttpKeepAlives": { - "description": "Disable HTTP keep-alives and use each connection once.", - "type": "boolean" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Dot" }, - "expectContinueTimeout": { - "description": "Timeout when using expect / 100-continue in HTTP", - "type": "integer" + "hashes": { + "description": "Comma separated list of supported checksum types.", + "type": "string" }, - "headers": { - "description": "Set HTTP header for all transactions", - "type": "object", - "additionalProperties": { - "type": "string" - } + "links": { + "description": "Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend.", + "type": "boolean", + "default": false }, - "insecureSkipVerify": { - "description": "Do not verify the server SSL certificate (insecure)", - "type": "boolean" + "noCheckUpdated": { + "description": "Don't check to see if the files change during upload.", + "type": "boolean", + "default": false }, - "lowlevelRetries": { - "description": "Maximum number of retries for low-level client errors. Default is 10 retries.", - "type": "integer" + "noClone": { + "description": "Disable reflink cloning for server-side copies.", + "type": "boolean", + "default": false }, - "noGzip": { - "description": "Don't set Accept-Encoding: gzip", - "type": "boolean" + "noPreallocate": { + "description": "Disable preallocation of disk space for transferred files.", + "type": "boolean", + "default": false }, - "retryBackoff": { - "description": "Constant backoff between retries. Default is 1s.", - "type": "integer" + "noSetModtime": { + "description": "Disable setting modtime.", + "type": "boolean", + "default": false }, - "retryBackoffExponential": { - "description": "Exponential backoff between retries. Default is 1.0.", - "type": "number" + "noSparse": { + "description": "Disable sparse files for multi-thread downloads.", + "type": "boolean", + "default": false }, - "retryDelay": { - "description": "Delay between retries. Default is 1s.", - "type": "integer" + "nounc": { + "description": "Disable UNC (long path names) conversion on Windows.", + "type": "boolean", + "default": false, + "example": true }, - "retryMaxCount": { - "description": "Maximum number of retries. Default is 10 retries.", - "type": "integer" + "oneFileSystem": { + "description": "Don't cross filesystem boundaries (unix/macOS only).", + "type": "boolean", + "default": false }, - "scanConcurrency": { - "description": "Maximum number of concurrent scan requests. Default is 1.", - "type": "integer" + "skipLinks": { + "description": "Don't warn about skipped symlinks.", + "type": "boolean", + "default": false }, - "skipInaccessibleFile": { - "description": "Skip inaccessible files. Default is false.", - "type": "boolean" + "skipSpecials": { + "description": "Don't warn about skipped pipes, sockets and device objects.", + "type": "boolean", + "default": false }, - "timeout": { - "description": "IO idle timeout", - "type": "integer" + "timeType": { + "description": "Set what kind of time is returned.", + "type": "string", + "default": "mtime", + "example": "mtime" }, - "useServerModTime": { - "description": "Use server modified time instead of object metadata", - "type": "boolean" + "unicodeNormalization": { + "description": "Apply unicode NFC normalization to paths and filenames.", + "type": "boolean", + "default": false }, - "userAgent": { - "description": "Set the user-agent to a specified string", - "type": "string" + "zeroSizeLinks": { + "description": "Assume the Stat size of links is zero (and read them instead) (deprecated).", + "type": "boolean", + "default": false } } }, - "model.ConfigMap": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "model.Deal": { + "storage.mailruConfig": { "type": "object", "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "checkHash": { + "description": "What should copy do if file checksum is mismatched or invalid.", + "type": "boolean", + "default": true, + "example": true + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "createdAt": { + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "dealId": { - "type": "integer" + "description": { + "description": "Description of the remote.", + "type": "string" }, - "dealType": { - "$ref": "#/definitions/model.DealType" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "endEpoch": { - "type": "integer" + "pass": { + "description": "Password.", + "type": "string" }, - "errorMessage": { + "quirks": { + "description": "Comma separated list of internal maintenance flags.", "type": "string" }, - "id": { - "type": "integer" + "speedupEnable": { + "description": "Skip full upload if there is another file with same data hash.", + "type": "boolean", + "default": true, + "example": true }, - "label": { + "speedupFilePatterns": { + "description": "Comma separated list of file name patterns eligible for speedup (put by hash).", + "type": "string", + "default": "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", + "example": "" + }, + "speedupMaxDisk": { + "description": "This option allows you to disable speedup (put by hash) for large files.", + "type": "string", + "default": "3Gi", + "example": "0" + }, + "speedupMaxMemory": { + "description": "Files larger than the size given below will always be hashed on disk.", + "type": "string", + "default": "32Mi", + "example": "0" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "lastVerifiedAt": { - "description": "LastVerifiedAt is the last time the deal was verified as active by the tracker", + "tokenUrl": { + "description": "Token server url.", "type": "string" }, - "nextChallengeEpoch": { - "description": "NextChallengeEpoch is the next epoch when a challenge proof is due", - "type": "integer" + "user": { + "description": "User name (usually email).", + "type": "string" }, - "pieceCid": { + "userAgent": { + "description": "HTTP user agent used internally by client.", + "type": "string" + } + } + }, + "storage.megaConfig": { + "type": "object", + "properties": { + "2fa": { + "description": "The 2FA code of your MEGA account if the account is set up with one", "type": "string" }, - "pieceSize": { - "type": "integer" + "debug": { + "description": "Output more debug from Mega.", + "type": "boolean", + "default": false }, - "price": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "proofSetId": { - "description": "PDP-specific fields (only populated for DealTypePDP)", - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "proofSetLive": { - "description": "ProofSetLive indicates if the proof set is live (actively being challenged)", - "type": "boolean" + "hardDelete": { + "description": "Delete files permanently rather than putting them into the trash.", + "type": "boolean", + "default": false }, - "proposalId": { + "masterKey": { + "description": "Master key (internal use only)", "type": "string" }, - "provider": { + "pass": { + "description": "Password.", "type": "string" }, - "scheduleId": { - "description": "Associations", - "type": "integer" + "sessionId": { + "description": "Session (internal use only)", + "type": "string" }, - "sectorStartEpoch": { - "type": "integer" + "useHttps": { + "description": "Use HTTPS for transfers.", + "type": "boolean", + "default": false }, - "startEpoch": { - "type": "integer" + "user": { + "description": "User name.", + "type": "string" + } + } + }, + "storage.netstorageConfig": { + "type": "object", + "properties": { + "account": { + "description": "Set the NetStorage account name", + "type": "string" }, - "state": { - "$ref": "#/definitions/model.DealState" + "description": { + "description": "Description of the remote.", + "type": "string" }, - "updatedAt": { + "host": { + "description": "Domain+path of NetStorage host to connect to.", "type": "string" }, - "verified": { - "type": "boolean" + "protocol": { + "description": "Select between HTTP or HTTPS protocol.", + "type": "string", + "default": "https", + "example": "http" }, - "walletId": { - "type": "integer" + "secret": { + "description": "Set the NetStorage account secret/G2O key for authentication.", + "type": "string" } } }, - "model.DealState": { - "type": "string", - "enum": [ - "proposed", - "published", - "active", - "expired", - "proposal_expired", - "rejected", - "slashed", - "error" - ], - "x-enum-varnames": [ - "DealProposed", - "DealPublished", - "DealActive", - "DealExpired", - "DealProposalExpired", - "DealRejected", - "DealSlashed", - "DealErrored" - ] - }, - "model.DealType": { - "type": "string", - "enum": [ - "market", - "pdp" - ], - "x-enum-varnames": [ - "DealTypeMarket", - "DealTypePDP" - ] - }, - "model.File": { + "storage.onedriveConfig": { "type": "object", - "properties": { - "attachmentId": { - "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", - "type": "integer" + "properties": { + "accessScopes": { + "description": "Set scopes to be requested by rclone.", + "type": "string", + "default": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", + "example": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" }, - "cid": { - "description": "CID is the CID of the file.", + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "directoryId": { - "type": "integer" + "avOverride": { + "description": "Allows download of files the server thinks has a virus.", + "type": "boolean", + "default": false }, - "fileRanges": { - "type": "array", - "items": { - "$ref": "#/definitions/model.FileRange" - } + "chunkSize": { + "description": "Chunk size to upload files with - must be multiple of 320k (327,680 bytes).", + "type": "string", + "default": "10Mi" }, - "hash": { - "description": "Hash is the hash of the file.", + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "id": { - "type": "integer" + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" }, - "lastModifiedNano": { - "type": "integer" + "delta": { + "description": "If set rclone will use delta listing to implement recursive listings.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path is the relative path to the file inside the storage.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "size": { - "description": "Size is the size of the file in bytes.", - "type": "integer" - } - } - }, - "model.FileRange": { - "type": "object", - "properties": { - "cid": { - "description": "CID is the CID of the range.", + "disableSitePermission": { + "description": "Disable the request for Sites.Read.All permission.", + "type": "boolean", + "default": false + }, + "driveId": { + "description": "The ID of the drive to use.", "type": "string" }, - "fileId": { - "type": "integer" + "driveType": { + "description": "The type of the drive (personal | business | documentLibrary).", + "type": "string" }, - "id": { - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot" }, - "jobId": { - "description": "Associations", - "type": "integer" + "exposeOnenoteFiles": { + "description": "Set to make OneNote files show up in directory listings.", + "type": "boolean", + "default": false }, - "length": { - "description": "Length is the length of the range in bytes.", - "type": "integer" + "hardDelete": { + "description": "Permanently delete files on removal.", + "type": "boolean", + "default": false }, - "offset": { - "description": "Offset is the offset of the range inside the file.", - "type": "integer" - } - } - }, - "model.Job": { - "type": "object", - "properties": { - "attachmentId": { - "type": "integer" + "hashType": { + "description": "Specify the hash in use for the backend.", + "type": "string", + "default": "auto", + "example": "auto" }, - "errorMessage": { + "linkPassword": { + "description": "Set the password for links created by the link command.", "type": "string" }, - "errorStackTrace": { + "linkScope": { + "description": "Set the scope of the links created by the link command.", + "type": "string", + "default": "anonymous", + "example": "anonymous" + }, + "linkType": { + "description": "Set the type of the links created by the link command.", + "type": "string", + "default": "view", + "example": "view" + }, + "listChunk": { + "description": "Size of listing chunk.", + "type": "integer", + "default": 1000 + }, + "metadataPermissions": { + "description": "Control whether permissions should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "noVersions": { + "description": "Remove all versions on modifying operations.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Choose national cloud region for OneDrive.", + "type": "string", + "default": "global", + "example": "global" + }, + "rootFolderId": { + "description": "ID of the root folder.", "type": "string" }, - "id": { - "type": "integer" + "serverSideAcrossConfigs": { + "description": "Deprecated: use --server-side-across-configs instead.", + "type": "boolean", + "default": false }, - "state": { - "$ref": "#/definitions/model.JobState" + "tenant": { + "description": "ID of the service principal's tenant. Also called its directory ID.", + "type": "string" }, - "type": { - "$ref": "#/definitions/model.JobType" + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" }, - "workerId": { - "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", + "tokenUrl": { + "description": "Token server url.", "type": "string" + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "off" } } }, - "model.JobState": { - "type": "string", - "enum": [ - "created", - "ready", - "paused", - "processing", - "complete", - "error" - ], - "x-enum-varnames": [ - "Created", - "Ready", - "Paused", - "Processing", - "Complete", - "Error" - ] - }, - "model.JobType": { - "type": "string", - "enum": [ - "scan", - "pack", - "daggen" - ], - "x-enum-varnames": [ - "Scan", - "Pack", - "DagGen" - ] - }, - "model.Preparation": { + "storage.oosEnv_authConfig": { "type": "object", "properties": { - "createdAt": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", "type": "string" }, - "deleteAfterExport": { - "description": "DeleteAfterExport is a flag that indicates whether the source files should be deleted after export.", - "type": "boolean" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "id": { - "type": "integer" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "maxSize": { - "type": "integer" + "namespace": { + "description": "Object storage namespace", + "type": "string" }, - "minPieceSize": { - "description": "Minimum piece size for the preparation, applies only to DAG and remainder pieces", - "type": "integer" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "name": { + "region": { + "description": "Object storage Region", "type": "string" }, - "noDag": { - "type": "boolean" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "noInline": { - "type": "boolean" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "outputStorages": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Storage" - } + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "pieceSize": { - "type": "integer" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" }, - "sourceStorages": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Storage" - } + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" }, - "updatedAt": { - "type": "string" + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" }, - "walletId": { - "description": "Associations", - "type": "integer" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" } } }, - "model.Schedule": { + "storage.oosInstance_principal_authConfig": { "type": "object", "properties": { - "allowedPieceCids": { - "type": "array", - "items": { - "type": "string" - } + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false }, - "announceToIpni": { - "type": "boolean" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "createdAt": { + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", "type": "string" }, - "dealType": { - "$ref": "#/definitions/model.DealType" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "duration": { - "type": "integer" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "errorMessage": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "force": { - "type": "boolean" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "httpHeaders": { - "$ref": "#/definitions/model.ConfigMap" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "id": { - "type": "integer" + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" }, - "keepUnsealed": { - "type": "boolean" + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "maxPendingDealNumber": { - "type": "integer" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "maxPendingDealSize": { - "type": "integer" + "namespace": { + "description": "Object storage namespace", + "type": "string" }, - "notes": { + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", "type": "string" }, - "preparationId": { - "description": "Associations", - "type": "integer" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "pricePerDeal": { - "type": "number" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "pricePerGb": { - "type": "number" + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "pricePerGbEpoch": { - "type": "number" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" }, - "provider": { + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosNo_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", "type": "string" }, - "scheduleCron": { + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", "type": "string" }, - "scheduleCronPerpetual": { - "type": "boolean" + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "scheduleDealNumber": { - "type": "integer" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "scheduleDealSize": { - "type": "integer" + "region": { + "description": "Object storage Region", + "type": "string" }, - "startDelay": { - "type": "integer" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "state": { - "$ref": "#/definitions/model.ScheduleState" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "totalDealNumber": { - "type": "integer" + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "totalDealSize": { - "type": "integer" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" }, - "updatedAt": { - "type": "string" + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" }, - "urlTemplate": { - "type": "string" + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" }, - "verified": { - "type": "boolean" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" } } }, - "model.ScheduleState": { - "type": "string", - "enum": [ - "active", - "paused", - "error", - "completed" - ], - "x-enum-varnames": [ - "ScheduleActive", - "SchedulePaused", - "ScheduleError", - "ScheduleCompleted" - ] - }, - "model.Storage": { + "storage.oosResource_principal_authConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "ClientConfig is the HTTP configuration for the storage, if applicable.", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false }, - "config": { - "description": "Config is a map of key-value pairs that can be used to store RClone options.", - "allOf": [ - { - "$ref": "#/definitions/model.ConfigMap" - } - ] + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "createdAt": { + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", "type": "string" }, - "id": { - "type": "integer" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "name": { - "type": "string" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "path": { - "description": "Path is the path to the storage root.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "preparationsAsOutput": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Preparation" - } + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "preparationsAsSource": { - "description": "Associations", - "type": "array", - "items": { - "$ref": "#/definitions/model.Preparation" - } + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "type": { + "endpoint": { + "description": "Endpoint for Object storage API.", "type": "string" }, - "updatedAt": { - "type": "string" - } - } - }, - "model.Wallet": { - "type": "object", - "properties": { - "actorId": { - "description": "nullable, links to on-chain actor f0...", - "type": "string" + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "address": { - "description": "filecoin address (f1.../f3...)", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", "type": "string" }, - "id": { - "type": "integer" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "keyPath": { - "description": "absolute path to key file", + "region": { + "description": "Object storage Region", "type": "string" }, - "keyStore": { - "description": "local, yubikey, aws-kms, etc", - "type": "string" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "name": { - "description": "optional label", - "type": "string" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" } } }, - "schedule.CreateRequest": { + "storage.oosUser_principal_authConfig": { "type": "object", "properties": { - "allowedPieceCids": { - "description": "Allowed piece CIDs in this schedule", - "type": "array", - "items": { - "type": "string" - } - }, - "dealType": { - "description": "Deal type: market (f05) or pdp (f41)", - "type": "string" + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false }, - "duration": { - "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "default": "12840h" + "default": "5Mi" }, - "force": { - "description": "Force to send out deals regardless of replication restriction", - "type": "boolean" + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", + "type": "string" }, - "httpHeaders": { - "description": "http headers to be passed with the request (i.e. key=value)", - "type": "array", - "items": { - "type": "string" - } + "configFile": { + "description": "Path to OCI config file", + "type": "string", + "default": "~/.oci/config", + "example": "~/.oci/config" }, - "ipni": { - "description": "Whether the deal should be IPNI", - "type": "boolean", - "default": true + "configProfile": { + "description": "Profile name inside the oci config file", + "type": "string", + "default": "Default", + "example": "Default" }, - "keepUnsealed": { - "description": "Whether the deal should be kept unsealed", - "type": "boolean", - "default": true + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "maxPendingDealNumber": { - "description": "Max pending deal number", - "type": "integer" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "maxPendingDealSize": { - "description": "Max pending deal size in human readable format, i.e. 100 TiB", + "description": { + "description": "Description of the remote.", "type": "string" }, - "notes": { - "description": "Notes", - "type": "string" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "preparation": { - "description": "Preparation ID or name", - "type": "string" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "pricePerDeal": { - "description": "Price in FIL per deal", - "type": "number", - "default": 0 + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" }, - "pricePerGb": { - "description": "Price in FIL per GiB", - "type": "number", - "default": 0 + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "pricePerGbEpoch": { - "description": "Price in FIL per GiB per epoch", - "type": "number", - "default": 0 + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "provider": { - "description": "Provider", + "namespace": { + "description": "Object storage namespace", "type": "string" }, - "scheduleCron": { - "description": "Schedule cron pattern", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", "type": "string" }, - "scheduleCronPerpetual": { - "description": "Whether a cron schedule should run in definitely", - "type": "boolean" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "scheduleDealNumber": { - "description": "Number of deals per scheduled time", - "type": "integer" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "scheduleDealSize": { - "description": "Size of deals per schedule trigger in human readable format, i.e. 100 TiB", - "type": "string" + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "startDelay": { - "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", "type": "string", - "default": "72h" + "example": "" }, - "totalDealNumber": { - "description": "Total number of deals", - "type": "integer" + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" }, - "totalDealSize": { - "description": "Total size of deals in human readable format, i.e. 100 TiB", - "type": "string" + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" }, - "urlTemplate": { - "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", - "type": "string" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 }, - "verified": { - "description": "Whether the deal should be verified", - "type": "boolean", - "default": true + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" } } }, - "schedule.UpdateRequest": { + "storage.oosWorkload_identity_authConfig": { "type": "object", "properties": { - "allowedPieceCids": { - "description": "Allowed piece CIDs in this schedule", - "type": "array", - "items": { - "type": "string" - } + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false }, - "dealType": { - "description": "Deal type: market (f05) or pdp (f41)", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", "type": "string" }, - "duration": { - "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", "type": "string", - "default": "12840h" + "default": "4.656Gi" }, - "force": { - "description": "Force to send out deals regardless of replication restriction", - "type": "boolean" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "httpHeaders": { - "description": "http headers to be passed with the request (i.e. key=value)", - "type": "array", - "items": { - "type": "string" - } + "description": { + "description": "Description of the remote.", + "type": "string" }, - "ipni": { - "description": "Whether the deal should be IPNI", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", - "default": true + "default": false }, - "keepUnsealed": { - "description": "Whether the deal should be kept unsealed", + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", "type": "boolean", - "default": true + "default": false }, - "maxPendingDealNumber": { - "description": "Max pending deal number", - "type": "integer" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "maxPendingDealSize": { - "description": "Max pending deal size in human readable format, i.e. 100 TiB", + "namespace": { + "description": "Object storage namespace", "type": "string" }, - "notes": { - "description": "Notes", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", "type": "string" }, - "pricePerDeal": { - "description": "Price in FIL per deal", - "type": "number", - "default": 0 + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "pricePerGb": { - "description": "Price in FIL per GiB", - "type": "number", - "default": 0 + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "pricePerGbEpoch": { - "description": "Price in FIL per GiB per epoch", - "type": "number", - "default": 0 + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "scheduleCron": { - "description": "Schedule cron pattern", - "type": "string" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 }, - "scheduleCronPerpetual": { - "description": "Whether a cron schedule should run in definitely", - "type": "boolean" + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.opendriveConfig": { + "type": "object", + "properties": { + "access": { + "description": "Files and folders will be uploaded with this access permission (default private)", + "type": "string", + "default": "private", + "example": "private" }, - "scheduleDealNumber": { - "description": "Number of deals per scheduled time", - "type": "integer" + "chunkSize": { + "description": "Files will be uploaded in chunks this size.", + "type": "string", + "default": "10Mi" }, - "scheduleDealSize": { - "description": "Size of deals per schedule trigger in human readable format, i.e. 100 TiB", + "description": { + "description": "Description of the remote.", "type": "string" }, - "startDelay": { - "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "default": "72h" - }, - "totalDealNumber": { - "description": "Total number of deals", - "type": "integer" + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot" }, - "totalDealSize": { - "description": "Total size of deals in human readable format, i.e. 100 TiB", + "password": { + "description": "Password.", "type": "string" }, - "urlTemplate": { - "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "username": { + "description": "Username.", "type": "string" - }, - "verified": { - "description": "Whether the deal should be verified", - "type": "boolean", - "default": true } } }, - "storage.DirEntry": { + "storage.pcloudConfig": { "type": "object", "properties": { - "dirId": { + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "hash": { + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "isDir": { - "type": "boolean" + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" }, - "lastModified": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "numItems": { - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "path": { + "hostname": { + "description": "Hostname to connect to.", + "type": "string", + "default": "api.pcloud.com", + "example": "api.pcloud.com" + }, + "password": { + "description": "Your pcloud password.", "type": "string" }, - "size": { - "type": "integer" - } - } - }, - "storage.RenameRequest": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { + "rootFolderId": { + "description": "Fill in for rclone to use a non root folder as its starting point.", + "type": "string", + "default": "d0" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "username": { + "description": "Your pcloud username.", "type": "string" } } }, - "storage.azureblobConfig": { + "storage.premiumizemeConfig": { "type": "object", "properties": { - "accessTier": { - "description": "Access tier of blob: hot, cool, cold or archive.", + "apiKey": { + "description": "API Key.", "type": "string" }, - "account": { - "description": "Azure Storage Account Name.", + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "archiveTierDelete": { - "description": "Delete archive tier blobs before overwriting.", + "clientCredentials": { + "description": "Use client credentials OAuth flow.", "type": "boolean", "default": false }, - "chunkSize": { - "description": "Upload chunk size.", - "type": "string", - "default": "4Mi" - }, - "clientCertificatePassword": { - "description": "Password for the certificate file (optional).", - "type": "string" - }, - "clientCertificatePath": { - "description": "Path to a PEM or PKCS12 certificate file including the private key.", - "type": "string" - }, "clientId": { - "description": "The ID of the client in use.", + "description": "OAuth Client Id.", "type": "string" }, "clientSecret": { - "description": "One of the service principal's client secrets", + "description": "OAuth Client Secret.", "type": "string" }, - "clientSendCertificateChain": { - "description": "Send the certificate chain when using certificate auth.", - "type": "boolean", - "default": false - }, - "deleteSnapshots": { - "description": "Set to specify how to deal with snapshots on blob deletion.", - "type": "string", - "example": "" - }, "description": { "description": "Description of the remote.", "type": "string" }, - "directoryMarkers": { - "description": "Upload an empty object with a trailing slash when a new directory is created", - "type": "boolean", - "default": false - }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", - "type": "boolean", - "default": false - }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8" + "default": "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "endpoint": { - "description": "Endpoint for the service.", + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "envAuth": { - "description": "Read credentials from runtime (environment variables, CLI or MSI).", - "type": "boolean", - "default": false - }, - "key": { - "description": "Storage Account Shared Key.", + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.putioConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "listChunk": { - "description": "Size of blob list.", - "type": "integer", - "default": 5000 - }, - "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed. (no longer used)", - "type": "string", - "default": "1m0s" - }, - "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "clientCredentials": { + "description": "Use client credentials OAuth flow.", "type": "boolean", "default": false }, - "msiClientId": { - "description": "Object ID of the user-assigned MSI to use, if any.", + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "msiMiResId": { - "description": "Azure resource ID of the user-assigned MSI to use, if any.", + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "msiObjectId": { - "description": "Object ID of the user-assigned MSI to use, if any.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "noCheckContainer": { - "description": "If set, don't attempt to check the container exists or create it.", - "type": "boolean", - "default": false + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "noHeadObject": { - "description": "If set, do not do HEAD before GET when getting objects.", - "type": "boolean", - "default": false + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" }, - "password": { - "description": "The user's password", + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.qingstorConfig": { + "type": "object", + "properties": { + "accessKeyId": { + "description": "QingStor Access Key ID.", "type": "string" }, - "publicAccess": { - "description": "Public access level of a container: blob or container.", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "example": "" + "default": "4Mi" }, - "sasUrl": { - "description": "SAS URL for container level access only.", + "connectionRetries": { + "description": "Number of connection retries.", + "type": "integer", + "default": 3 + }, + "description": { + "description": "Description of the remote.", "type": "string" }, - "servicePrincipalFile": { - "description": "Path to file containing credentials for use with a service principal.", + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Ctl,InvalidUtf8" + }, + "endpoint": { + "description": "Enter an endpoint URL to connection QingStor API.", "type": "string" }, - "tenant": { - "description": "ID of the service principal's tenant. Also called its directory ID.", + "envAuth": { + "description": "Get QingStor credentials from runtime.", + "type": "boolean", + "default": false, + "example": false + }, + "secretAccessKey": { + "description": "QingStor Secret Access Key (password).", "type": "string" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads.", "type": "integer", - "default": 16 + "default": 1 }, "uploadCutoff": { - "description": "Cutoff for switching to chunked upload (\u003c= 256 MiB) (deprecated).", - "type": "string" - }, - "useEmulator": { - "description": "Uses local storage emulator if provided as 'true'.", - "type": "boolean", - "default": false - }, - "useMsi": { - "description": "Use a managed service identity to authenticate (only works in Azure).", - "type": "boolean", - "default": false + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "username": { - "description": "User name (usually an email address)", - "type": "string" + "zone": { + "description": "Zone to connect to.", + "type": "string", + "example": "pek3a" } } }, - "storage.b2Config": { + "storage.s3AWSConfig": { "type": "object", "properties": { - "account": { - "description": "Account ID or Application Key ID.", + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, "chunkSize": { - "description": "Upload chunk size.", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "96Mi" + "default": "5Mi" }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", - "default": "4Gi" + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, + "directoryBucket": { + "description": "Set to use AWS Directory Buckets", + "type": "boolean", + "default": false + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { - "description": "Disable checksums for large (\u003e upload cutoff) files.", + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "downloadAuthDuration": { - "description": "Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.", - "type": "string", - "default": "1w" + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false }, "downloadUrl": { "description": "Custom endpoint for downloads.", @@ -7430,26 +13490,53 @@ const docTemplate = `{ "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for the service.", + "description": "Endpoint for S3 API.", "type": "string" }, - "hardDelete": { - "description": "Permanently delete files on remote removal, otherwise hide files.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", "type": "boolean", "default": false }, - "key": { - "description": "Application Key.", - "type": "string" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "lifecycle": { - "description": "Set the number of days deleted files should be kept when creating a bucket.", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string", + "example": "" + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, "memoryPoolFlushTime": { "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", @@ -7460,2549 +13547,1584 @@ const docTemplate = `{ "type": "boolean", "default": false }, - "testMode": { - "description": "A flag string for X-Bz-Test-Mode header for debugging.", - "type": "string" - }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 4 - }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "200Mi" + "default": "unset" }, - "versionAt": { - "description": "Show file versions as they were at the specified time.", - "type": "string", - "default": "off" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "versions": { - "description": "Include old versions in directory listings.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false - } - } - }, - "storage.boxConfig": { - "type": "object", - "properties": { - "accessToken": { - "description": "Box App Primary Access Token", - "type": "string" }, - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "boxConfigFile": { - "description": "Box App config.json location", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "boxSubType": { + "region": { + "description": "Region to connect to.", "type": "string", - "default": "user", - "example": "user" + "example": "us-east-1" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "requesterPays": { + "description": "Enables requester pays option when interacting with S3 bucket.", + "type": "boolean", + "default": false }, - "clientSecret": { - "description": "OAuth Client Secret.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "commitRetries": { - "description": "Max number of times to try committing a multipart file.", - "type": "integer", - "default": 100 - }, - "description": { - "description": "Description of the remote.", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot" - }, - "impersonate": { - "description": "Impersonate this user ID when using a service account.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "listChunk": { - "description": "Size of listing chunk 1-1000.", - "type": "integer", - "default": 1000 - }, - "ownedBy": { - "description": "Only show items owned by the login (email address) passed in.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" }, - "rootFolderId": { - "description": "Fill in for rclone to use a non root folder as its starting point.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "0" + "default": "Off" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", "type": "string" }, - "uploadCutoff": { - "description": "Cutoff for switching to multipart upload (\u003e= 50 MiB).", - "type": "string", - "default": "50Mi" - } - } - }, - "storage.createAzureblobStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] - }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.azureblobConfig" - } - ] - }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" - } - } - }, - "storage.createB2StorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.b2Config" - } - ] + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createBoxStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.boxConfig" - } - ] + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", + "type": "string", + "example": "" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "REDUCED_REDUNDANCY" + }, + "stsEndpoint": { + "description": "Endpoint for STS (deprecated).", "type": "string" - } - } - }, - "storage.createDriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.driveConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "name": { - "description": "Name of the storage, must be unique", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "my-storage" + "default": "200Mi" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createDropboxStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAccelerateEndpoint": { + "description": "If true use the AWS S3 accelerated endpoint.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.dropboxConfig" - } - ] + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createFichierStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.fichierConfig" - } - ] + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createFilefabricStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.filefabricConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createFtpStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.ftpConfig" - } - ] + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "example": "my-storage" + "default": "off" }, - "path": { - "description": "Path of the storage", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createGcsStorageRequest": { + "storage.s3AlibabaConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.gcsConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" + "example": "private" }, - "path": { - "description": "Path of the storage", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createGphotosStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.gphotosConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" - } - } - }, - "storage.createHdfsStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.hdfsConfig" - } - ] + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the storage, must be unique", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "my-storage" + "example": "oss-accelerate.aliyuncs.com" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createHidriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.hidriveConfig" - } - ] + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "name": { - "description": "Name of the storage, must be unique", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createHttpStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.httpConfig" - } - ] + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "name": { - "description": "Name of the storage, must be unique", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "example": "my-storage" + "default": "1m0s" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createInternetarchiveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.internetarchiveConfig" - } - ] + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.createJottacloudStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.jottacloudConfig" - } - ] + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" - } - } - }, - "storage.createKoofrDigistorageStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.koofrDigistorageConfig" - } - ] + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createKoofrKoofrStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.koofrKoofrConfig" - } - ] + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", "type": "string" - } - } - }, - "storage.createKoofrOtherStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.koofrOtherConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "name": { - "description": "Name of the storage, must be unique", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "my-storage" + "default": "200Mi" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createLocalStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.localConfig" - } - ] + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createMailruStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.mailruConfig" - } - ] + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createMegaStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.megaConfig" - } - ] + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "example": "my-storage" + "default": "off" }, - "path": { - "description": "Path of the storage", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createNetstorageStorageRequest": { + "storage.s3ArvanCloudConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.netstorageConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" + "example": "private" }, - "path": { - "description": "Path of the storage", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createOnedriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.onedriveConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" - } - } - }, - "storage.createOosEnv_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosEnv_authConfig" - } - ] + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the storage, must be unique", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "my-storage" + "example": "s3.ir-thr-at1.arvanstorage.ir" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createOosInstance_principal_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosInstance_principal_authConfig" - } - ] + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "name": { - "description": "Name of the storage, must be unique", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createOosNo_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosNo_authConfig" - } - ] + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string", + "example": "ir-thr-at1" }, - "name": { - "description": "Name of the storage, must be unique", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "example": "my-storage" + "default": "1m0s" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createOosResource_principal_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosResource_principal_authConfig" - } - ] + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.createOosUser_principal_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosUser_principal_authConfig" - } - ] + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" - } - } - }, - "storage.createOosWorkload_identity_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosWorkload_identity_authConfig" - } - ] + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createOpendriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.opendriveConfig" - } - ] + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", "type": "string" - } - } - }, - "storage.createPcloudStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.pcloudConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "name": { - "description": "Name of the storage, must be unique", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createPremiumizemeStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "default": "200Mi" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.premiumizemeConfig" - } - ] + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createPutioStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.putioConfig" - } - ] + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createQingstorStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.qingstorConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3AWSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3AWSConfig" - } - ] + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "example": "my-storage" + "default": "off" }, - "path": { - "description": "Path of the storage", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createS3AlibabaStorageRequest": { + "storage.s3BizflyCloudConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3AlibabaConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3ArvanCloudStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "example": "private" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3ArvanCloudConfig" - } - ] + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "name": { - "description": "Name of the storage, must be unique", + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", "type": "string", - "example": "my-storage" + "default": "4.656Gi" }, - "path": { - "description": "Path of the storage", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createS3CephStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3CephConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" - } - } - }, - "storage.createS3ChinaMobileStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3ChinaMobileConfig" - } - ] + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the storage, must be unique", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "my-storage" + "example": "hn.ss.bfcplatform.vn" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3CloudflareStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3CloudflareConfig" - } - ] + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "name": { - "description": "Name of the storage, must be unique", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3DigitalOceanStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3DigitalOceanConfig" - } - ] + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" }, - "name": { - "description": "Name of the storage, must be unique", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3DreamhostStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3DreamhostConfig" - } - ] + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3GCSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3GCSConfig" - } - ] + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "region": { + "description": "Region to connect to.", "type": "string", - "example": "my-storage" + "example": "hn" }, - "path": { - "description": "Path of the storage", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" - } - } - }, - "storage.createS3HuaweiOBSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] - }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3HuaweiOBSConfig" - } - ] }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" - } - } - }, - "storage.createS3IBMCOSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3IBMCOSConfig" - } - ] + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createS3IDriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3IDriveConfig" - } - ] + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3IONOSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3IONOSConfig" - } - ] + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "name": { - "description": "Name of the storage, must be unique", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3LeviiaStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3LeviiaConfig" - } - ] + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3LiaraStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3LiaraConfig" - } - ] + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3LinodeStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3LinodeConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3LyveCloudStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3LyveCloudConfig" - } - ] + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "example": "my-storage" + "default": "off" }, - "path": { - "description": "Path of the storage", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createS3MagaluStorageRequest": { + "storage.s3CephConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3MagaluConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" + "example": "private" }, - "path": { - "description": "Path of the storage", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createS3MinioStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3MinioConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "my-storage" + "default": "Slash,InvalidUtf8,Dot" }, - "path": { - "description": "Path of the storage", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string" - } - } - }, - "storage.createS3NeteaseStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3NeteaseConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "name": { - "description": "Name of the storage, must be unique", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" - } - } - }, - "storage.createS3OtherStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3OtherConfig" - } - ] + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "name": { - "description": "Name of the storage, must be unique", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3PetaboxStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "default": "1m0s" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3PetaboxConfig" - } - ] + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.createS3QiniuStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3QiniuConfig" - } - ] + "region": { + "description": "Region to connect to.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" - } - } - }, - "storage.createS3RackCorpStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3RackCorpConfig" - } - ] + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createS3RcloneStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3RcloneConfig" - } - ] + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" - } - } - }, - "storage.createS3ScalewayStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3ScalewayConfig" - } - ] + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3SeaweedFSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3SeaweedFSConfig" - } - ] + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" }, - "name": { - "description": "Name of the storage, must be unique", + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3StackPathStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3StackPathConfig" - } - ] + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "name": { - "description": "Name of the storage, must be unique", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3StorjStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3StorjConfig" - } - ] + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3SynologyStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3SynologyConfig" - } - ] + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3TencentCOSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3TencentCOSConfig" - } - ] + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createS3WasabiStorageRequest": { + "storage.s3ChinaMobileConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3WasabiConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" + "example": "private" }, - "path": { - "description": "Path of the storage", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createSeafileStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.seafileConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" - } - } - }, - "storage.createSftpStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.sftpConfig" - } - ] + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the storage, must be unique", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "my-storage" + "example": "eos-wuxi-1.cmecloud.cn" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createSharefileStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.sharefileConfig" - } - ] + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createSiaStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.siaConfig" - } - ] + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "name": { - "description": "Name of the storage, must be unique", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string", - "example": "my-storage" + "example": "wuxi1" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createSmbStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.smbConfig" - } - ] + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" }, - "name": { - "description": "Name of the storage, must be unique", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createStorjExistingStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.storjExistingConfig" - } - ] + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.createStorjNewStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.storjNewConfig" - } - ] + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" - } - } - }, - "storage.createSugarsyncStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.sugarsyncConfig" - } - ] + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createSwiftStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.swiftConfig" - } - ] + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" - } - } - }, - "storage.createUnionStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.unionConfig" - } - ] + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createUptoboxStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.uptoboxConfig" - } - ] + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", "type": "string" - } - } - }, - "storage.createWebdavStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.webdavConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "name": { - "description": "Name of the storage, must be unique", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "my-storage" + "default": "200Mi" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createYandexStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.yandexConfig" - } - ] + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createZohoStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.zohoConfig" - } - ] + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.driveConfig": { - "type": "object", - "properties": { - "acknowledgeAbuse": { - "description": "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "allowImportNameChange": { - "description": "Allow the filetype to change when uploading Google docs.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "alternateExport": { - "description": "Deprecated: No longer needed.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "authOwnerOnly": { - "description": "Only consider files owned by the authenticated user.", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", "default": false - }, - "authUrl": { - "description": "Auth server URL.", + } + } + }, + "storage.s3CloudflareConfig": { + "type": "object", + "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", "type": "string" }, "chunkSize": { - "description": "Upload chunk size.", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "8Mi" - }, - "clientId": { - "description": "Google Application Client Id", - "type": "string" + "default": "5Mi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "copyShortcutContent": { - "description": "Server side copy contents of shortcuts instead of the shortcut.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, @@ -10010,499 +15132,513 @@ const docTemplate = `{ "description": "Description of the remote.", "type": "string" }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, "disableHttp2": { - "description": "Disable drive using http2.", + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", - "default": true + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "InvalidUtf8" + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { - "description": "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", "default": false, "example": false }, - "exportFormats": { - "description": "Comma separated list of preferred formats for downloading Google docs.", - "type": "string", - "default": "docx,xlsx,pptx,svg" - }, - "fastListBugFix": { - "description": "Work around a bug in Google Drive listing.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", "default": true }, - "formats": { - "description": "Deprecated: See export_formats.", - "type": "string" - }, - "impersonate": { - "description": "Impersonate this user when using a service account.", - "type": "string" - }, - "importFormats": { - "description": "Comma separated list of preferred formats for uploading Google docs.", - "type": "string" - }, - "keepRevisionForever": { - "description": "Keep new head revision of each file forever.", - "type": "boolean", - "default": false - }, "listChunk": { - "description": "Size of listing chunk 100-1000, 0 to disable.", + "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", "default": 1000 }, - "metadataLabels": { - "description": "Control whether labels should be read or written in metadata.", - "type": "string", - "default": "off", - "example": "off" - }, - "metadataOwner": { - "description": "Control whether owner should be read or written in metadata.", - "type": "string", - "default": "read", - "example": "off" - }, - "metadataPermissions": { - "description": "Control whether permissions should be read or written in metadata.", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "off", - "example": "off" + "default": "unset" }, - "pacerBurst": { - "description": "Number of API calls to allow without sleeping.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", "type": "integer", - "default": 100 - }, - "pacerMinSleep": { - "description": "Minimum time to sleep between API calls.", - "type": "string", - "default": "100ms" - }, - "resourceKey": { - "description": "Resource key for accessing a link-shared file.", - "type": "string" + "default": 0 }, - "rootFolderId": { - "description": "ID of the root folder.", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "scope": { - "description": "Comma separated list of scopes that rclone should use when requesting access from drive.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "example": "drive" + "default": "1m0s" }, - "serverSideAcrossConfigs": { - "description": "Deprecated: use --server-side-across-configs instead.", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "serviceAccountCredentials": { - "description": "Service Account Credentials JSON blob.", - "type": "string" - }, - "serviceAccountFile": { - "description": "Service Account Credentials JSON file path.", - "type": "string" + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, - "sharedWithMe": { - "description": "Only show files that are shared with me.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "showAllGdocs": { - "description": "Show all Google Docs including non-exportable ones in listings.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "sizeAsQuota": { - "description": "Show sizes as storage quota usage, not actual size.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "skipChecksumGphotos": { - "description": "Skip checksums on Google photos and videos only.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false }, - "skipDanglingShortcuts": { - "description": "If set skip dangling shortcut files.", - "type": "boolean", - "default": false + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" }, - "skipGdocs": { - "description": "Skip google documents in all listings.", - "type": "boolean", - "default": false + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "auto" }, - "skipShortcuts": { - "description": "If set skip shortcut files.", - "type": "boolean", - "default": false + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "starredOnly": { - "description": "Only show files that are starred.", - "type": "boolean", - "default": false + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "stopOnDownloadLimit": { - "description": "Make download limit errors be fatal.", - "type": "boolean", - "default": false + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "stopOnUploadLimit": { - "description": "Make upload limit errors be fatal.", - "type": "boolean", - "default": false + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "teamDrive": { - "description": "ID of the Shared Drive (Team Drive).", + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "trashedOnly": { - "description": "Only show files that are in the trash.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "default": "8Mi" + "default": "unset" }, - "useCreatedDate": { - "description": "Use file created date instead of modified date.", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "useSharedDate": { - "description": "Use date file was shared instead of modified date.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "useTrash": { - "description": "Send files to the trash instead of deleting permanently.", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", - "default": true - }, - "v2DownloadMinSize": { - "description": "If Object's are greater, use drive v2 API to download.", - "type": "string", - "default": "off" + "default": false } } }, - "storage.dropboxConfig": { + "storage.s3CubbitConfig": { "type": "object", "properties": { - "authUrl": { - "description": "Auth server URL.", + "accessKeyId": { + "description": "AWS Access Key ID.", "type": "string" }, - "batchCommitTimeout": { - "description": "Max time to wait for a batch to finish committing", - "type": "string", - "default": "10m0s" - }, - "batchMode": { - "description": "Upload file batching sync|async|off.", - "type": "string", - "default": "sync" - }, - "batchSize": { - "description": "Max number of files in upload batch.", - "type": "integer", - "default": 0 + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "batchTimeout": { - "description": "Max time to allow an idle upload batch before uploading.", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "0s" + "example": "private" }, "chunkSize": { - "description": "Upload chunk size (\u003c 150Mi).", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "48Mi" + "default": "5Mi" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "impersonate": { - "description": "Impersonate this user when using a business account.", - "type": "string" + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string", + "example": "s3.cubbit.eu" }, - "pacerMinSleep": { - "description": "Minimum time to sleep between API calls.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "10ms" + "default": "unset" }, - "rootNamespace": { - "description": "Specify a different Dropbox namespace ID to use as the root for all paths.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "sharedFiles": { - "description": "Instructs rclone to work on individual shared files.", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "sharedFolders": { - "description": "Instructs rclone to work on shared folders.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" - } - } - }, - "storage.fichierConfig": { - "type": "object", - "properties": { - "apiKey": { - "description": "Your API Key, get it from https://1fichier.com/console/params.pl.", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "cdn": { - "description": "Set if you wish to use CDN download links.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false }, - "description": { - "description": "Description of the remote.", + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "region": { + "description": "Region to connect to.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot" + "example": "eu-west-1" }, - "filePassword": { - "description": "If you want to download a shared file that is password protected, add this parameter.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "folderPassword": { - "description": "If you want to list the files in a shared folder that is password protected, add this parameter.", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "sharedFolder": { - "description": "If you want to download a shared folder, add this parameter.", - "type": "string" - } - } - }, - "storage.filefabricConfig": { - "type": "object", - "properties": { - "description": { - "description": "Description of the remote.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,Del,Ctl,InvalidUtf8,Dot" - }, - "permanentToken": { - "description": "Permanent Authentication Token.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" }, - "rootFolderId": { - "description": "ID of the root folder.", - "type": "string" + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, - "token": { - "description": "Session Token.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "tokenExpiry": { - "description": "Token expiry time.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "url": { - "description": "URL of the Enterprise File Fabric to connect to.", - "type": "string", - "example": "https://storagemadeeasy.com" - }, - "version": { - "description": "Version read from the file fabric.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" - } - } - }, - "storage.ftpConfig": { - "type": "object", - "properties": { - "askPassword": { - "description": "Allow asking for FTP password when needed.", - "type": "boolean", - "default": false }, - "closeTimeout": { - "description": "Maximum time to wait for a response to close.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "1m0s" + "default": "unset" }, - "concurrency": { - "description": "Maximum number of FTP simultaneous connections, 0 for unlimited.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 0 + "default": 4 }, - "description": { - "description": "Description of the remote.", - "type": "string" + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "disableEpsv": { - "description": "Disable using EPSV even if server advertises support.", - "type": "boolean", - "default": false + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "disableMlsd": { - "description": "Disable using MLSD even if server advertises support.", - "type": "boolean", - "default": false + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "disableTls13": { - "description": "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "disableUtf8": { - "description": "Disable using UTF-8 even if server advertises support.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "Slash,Del,Ctl,RightSpace,Dot", - "example": "Asterisk,Ctl,Dot,Slash" + "default": "unset" }, - "explicitTls": { - "description": "Use Explicit FTPS (FTP over TLS).", - "type": "boolean", - "default": false + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "forceListHidden": { - "description": "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "host": { - "description": "FTP host to connect to.", - "type": "string" + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "idleTimeout": { - "description": "Max time before closing idle connections.", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "default": "1m0s" + "default": "unset" }, - "noCheckCertificate": { - "description": "Do not verify the TLS certificate of the server.", + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "pass": { - "description": "FTP password.", - "type": "string" - }, - "port": { - "description": "FTP port number.", - "type": "integer", - "default": 21 - }, - "shutTimeout": { - "description": "Maximum time to wait for data connection closing status.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "1m0s" - }, - "socksProxy": { - "description": "Socks 5 proxy host.", - "type": "string" + "default": "off" }, - "tls": { - "description": "Use Implicit FTPS (FTP over TLS).", + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "tlsCacheSize": { - "description": "Size of TLS session cache for all control and data connections.", - "type": "integer", - "default": 32 - }, - "user": { - "description": "FTP username.", - "type": "string", - "default": "$USER" - }, - "writingMdtm": { - "description": "Use MDTM to set modification time (VsFtpd quirk)", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", "default": false } } }, - "storage.gcsConfig": { + "storage.s3DigitalOceanConfig": { "type": "object", "properties": { - "anonymous": { - "description": "Access public buckets and objects without credentials.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "authUrl": { - "description": "Auth server URL.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, "bucketAcl": { - "description": "Access Control List for new buckets.", + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "authenticatedRead" - }, - "bucketPolicyOnly": { - "description": "Access checks should use bucket-level IAM policies.", - "type": "boolean", - "default": false + "example": "private" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, "decompress": { "description": "If set this will decompress gzip encoded objects.", @@ -10518,748 +15654,779 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,CrLf,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for the service.", - "type": "string" + "description": "Endpoint for S3 API.", + "type": "string", + "example": "syd1.digitaloceanspaces.com" }, "envAuth": { - "description": "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", "default": false, "example": false }, - "location": { - "description": "Location for the newly created buckets.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "" + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, "noCheckBucket": { "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "objectAcl": { - "description": "Access Control List for new objects.", - "type": "string", - "example": "authenticatedRead" + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "projectNumber": { - "description": "Project number.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "serviceAccountCredentials": { - "description": "Service Account Credentials JSON blob.", + "region": { + "description": "Region to connect to.", "type": "string" }, - "serviceAccountFile": { - "description": "Service Account Credentials JSON file path.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing objects in Google Cloud Storage.", - "type": "string", - "example": "" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "userProject": { - "description": "User project.", - "type": "string" - } - } - }, - "storage.gphotosConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "batchCommitTimeout": { - "description": "Max time to wait for a batch to finish committing", - "type": "string", - "default": "10m0s" + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" }, - "batchMode": { - "description": "Upload file batching sync|async|off.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "sync" + "default": "unset" }, - "batchSize": { - "description": "Max number of files in upload batch.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 0 + "default": 4 }, - "batchTimeout": { - "description": "Max time to allow an idle upload batch before uploading.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "0s" + "default": "200Mi" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "default": "Slash,CrLf,InvalidUtf8,Dot" + "default": "unset" }, - "includeArchived": { - "description": "Also view and download archived media.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "readOnly": { - "description": "Set to make the Google Photos backend read only.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "readSize": { - "description": "Set to read the size of media items.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "startYear": { - "description": "Year limits the photos to be downloaded to those which are uploaded after the given year.", - "type": "integer", - "default": 2000 + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.hdfsConfig": { + "storage.s3DreamhostConfig": { "type": "object", "properties": { - "dataTransferProtection": { - "description": "Kerberos data transfer protection: authentication|integrity|privacy.", - "type": "string", - "example": "privacy" - }, - "description": { - "description": "Description of the remote.", - "type": "string" - }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,Colon,Del,Ctl,InvalidUtf8,Dot" - }, - "namenode": { - "description": "Hadoop name nodes and ports.", + "accessKeyId": { + "description": "AWS Access Key ID.", "type": "string" }, - "servicePrincipalName": { - "description": "Kerberos service principal name for the namenode.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, - "username": { - "description": "Hadoop user name.", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "root" - } - } - }, - "storage.hidriveConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "example": "private" }, "chunkSize": { - "description": "Chunksize for chunked uploads.", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "48Mi" + "default": "5Mi" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, - "disableFetchingMemberCount": { - "description": "Do not fetch number of objects in directories unless it is absolutely necessary.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,Dot" + "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for the service.", + "description": "Endpoint for S3 API.", "type": "string", - "default": "https://api.hidrive.strato.com/2.1" + "example": "objects-us-east-1.dream.io" }, - "rootPrefix": { - "description": "The root/parent folder for all paths.", - "type": "string", - "default": "/", - "example": "/" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "scopeAccess": { - "description": "Access permissions that rclone should use when requesting access from HiDrive.", - "type": "string", - "default": "rw", - "example": "rw" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "scopeRole": { - "description": "User-level that rclone should use when requesting access from HiDrive.", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "user", - "example": "user" + "default": "unset" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "tokenUrl": { - "description": "Token server url.", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" }, - "uploadConcurrency": { - "description": "Concurrency for chunked uploads.", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", "type": "integer", - "default": 4 + "default": 10000 }, - "uploadCutoff": { - "description": "Cutoff/Threshold for chunked uploads.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "default": "96Mi" - } - } - }, - "storage.httpConfig": { - "type": "object", - "properties": { - "description": { - "description": "Description of the remote.", - "type": "string" + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "headers": { - "description": "Set HTTP headers for all transactions.", - "type": "string" + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, - "noEscape": { - "description": "Do not escape URL metacharacters in path names.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, "noHead": { - "description": "Don't use HEAD requests.", + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "noSlash": { - "description": "Set this if the site doesn't end directories with /.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "url": { - "description": "URL of HTTP host to connect to.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.internetarchiveConfig": { - "type": "object", - "properties": { - "accessKeyId": { - "description": "IAS3 Access Key.", + }, + "region": { + "description": "Region to connect to.", "type": "string" }, - "description": { - "description": "Description of the remote.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "disableChecksum": { - "description": "Don't ask the server to test against MD5 checksum calculated by rclone.", - "type": "boolean", - "default": true + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot" + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "endpoint": { - "description": "IAS3 Endpoint.", - "type": "string", - "default": "https://s3.us.archive.org" + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "frontEndpoint": { - "description": "Host of InternetArchive Frontend.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "https://archive.org" + "default": "Off" }, "secretAccessKey": { - "description": "IAS3 Secret Key (password).", + "description": "AWS Secret Access Key (password).", "type": "string" }, - "waitArchive": { - "description": "Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish.", - "type": "string", - "default": "0s" - } - } - }, - "storage.jottacloudConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "clientId": { - "description": "OAuth Client Id.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "encoding": { - "description": "The encoding for the backend.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot" + "default": "200Mi" }, - "hardDelete": { - "description": "Delete files permanently rather than putting them into the trash.", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "md5MemoryLimit": { - "description": "Files bigger than this will be cached on disk to calculate the MD5 if required.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "default": "10Mi" + "default": "unset" }, - "noVersions": { - "description": "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "trashedOnly": { - "description": "Only show files that are in the trash.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "uploadResumeLimit": { - "description": "Files bigger than this can be resumed if the upload fail's.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "default": "10Mi" - } - } - }, - "storage.koofrDigistorageConfig": { - "type": "object", - "properties": { - "description": { - "description": "Description of the remote.", - "type": "string" + "default": "unset" }, - "encoding": { - "description": "The encoding for the backend.", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "unset" }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "password": { - "description": "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.", - "type": "string" + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" }, - "setmtime": { - "description": "Does the backend support setting modification time.", + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", - "default": true + "default": false }, - "user": { - "description": "Your user name.", - "type": "string" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.koofrKoofrConfig": { + "storage.s3ExabaConfig": { "type": "object", "properties": { - "description": { - "description": "Description of the remote.", + "accessKeyId": { + "description": "AWS Access Key ID.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "example": "private" }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "password": { - "description": "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "setmtime": { - "description": "Does the backend support setting modification time.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", - "default": true + "default": false }, - "user": { - "description": "Your user name.", - "type": "string" - } - } - }, - "storage.koofrOtherConfig": { - "type": "object", - "properties": { "description": { "description": "Description of the remote.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "The Koofr API endpoint to use.", - "type": "string" - }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" - }, - "password": { - "description": "Your password for rclone (generate one at your service's settings page).", - "type": "string" - }, - "setmtime": { - "description": "Does the backend support setting modification time.", - "type": "boolean", - "default": true - }, - "user": { - "description": "Your user name.", - "type": "string" - } - } - }, - "storage.localConfig": { - "type": "object", - "properties": { - "caseInsensitive": { - "description": "Force the filesystem to report itself as case insensitive.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "caseSensitive": { - "description": "Force the filesystem to report itself as case sensitive.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "copyLinks": { - "description": "Follow symlinks and copy the pointed to item.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "description": { - "description": "Description of the remote.", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "links": { - "description": "Translate symlinks to/from regular files with a '.rclonelink' extension.", - "type": "boolean", - "default": false + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string" }, - "noCheckUpdated": { - "description": "Don't check to see if the files change during upload.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", - "default": false + "default": false, + "example": false }, - "noClone": { - "description": "Disable reflink cloning for server-side copies.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", - "default": false + "default": true }, - "noPreallocate": { - "description": "Disable preallocation of disk space for transferred files.", - "type": "boolean", - "default": false + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "noSetModtime": { - "description": "Disable setting modtime.", - "type": "boolean", - "default": false + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" }, - "noSparse": { - "description": "Disable sparse files for multi-thread downloads.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "nounc": { - "description": "Disable UNC (long path names) conversion on Windows.", - "type": "boolean", - "default": false, - "example": true + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, - "oneFileSystem": { - "description": "Don't cross filesystem boundaries (unix/macOS only).", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "skipLinks": { - "description": "Don't warn about skipped symlinks.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "timeType": { - "description": "Set what kind of time is returned.", - "type": "string", - "default": "mtime", - "example": "mtime" - }, - "unicodeNormalization": { - "description": "Apply unicode NFC normalization to paths and filenames.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "zeroSizeLinks": { - "description": "Assume the Stat size of links is zero (and read them instead) (deprecated).", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false - } - } - }, - "storage.mailruConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "checkHash": { - "description": "What should copy do if file checksum is mismatched or invalid.", - "type": "boolean", - "default": true, - "example": true + "region": { + "description": "Region to connect to.", + "type": "string" }, - "clientId": { - "description": "OAuth Client Id.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "description": { - "description": "Description of the remote.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Off" }, - "pass": { - "description": "Password.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "quirks": { - "description": "Comma separated list of internal maintenance flags.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "speedupEnable": { - "description": "Skip full upload if there is another file with same data hash.", - "type": "boolean", - "default": true, - "example": true + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" }, - "speedupFilePatterns": { - "description": "Comma separated list of file name patterns eligible for speedup (put by hash).", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", - "example": "" + "default": "unset" }, - "speedupMaxDisk": { - "description": "This option allows you to disable speedup (put by hash) for large files.", - "type": "string", - "default": "3Gi", - "example": "0" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "speedupMaxMemory": { - "description": "Files larger than the size given below will always be hashed on disk.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "32Mi", - "example": "0" - }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "default": "200Mi" }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "user": { - "description": "User name (usually email).", - "type": "string" + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "userAgent": { - "description": "HTTP user agent used internally by client.", - "type": "string" - } - } - }, - "storage.megaConfig": { - "type": "object", - "properties": { - "debug": { - "description": "Output more debug from Mega.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "description": { - "description": "Description of the remote.", - "type": "string" - }, - "encoding": { - "description": "The encoding for the backend.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" + "default": "unset" }, - "hardDelete": { - "description": "Delete files permanently rather than putting them into the trash.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "pass": { - "description": "Password.", - "type": "string" + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "useHttps": { - "description": "Use HTTPS for transfers.", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "user": { - "description": "User name.", - "type": "string" - } - } - }, - "storage.netstorageConfig": { - "type": "object", - "properties": { - "account": { - "description": "Set the NetStorage account name", - "type": "string" + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" }, - "host": { - "description": "Domain+path of NetStorage host to connect to.", - "type": "string" + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "protocol": { - "description": "Select between HTTP or HTTPS protocol.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "https", - "example": "http" + "default": "off" }, - "secret": { - "description": "Set the NetStorage account secret/G2O key for authentication.", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.onedriveConfig": { + "storage.s3FileLuConfig": { "type": "object", "properties": { - "accessScopes": { - "description": "Set scopes to be requested by rclone.", - "type": "string", - "default": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", - "example": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "authUrl": { - "description": "Auth server URL.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, - "avOverride": { - "description": "Allows download of files the server thinks has a virus.", - "type": "boolean", - "default": false + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" }, "chunkSize": { - "description": "Chunk size to upload files with - must be multiple of 320k (327,680 bytes).", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "10Mi" - }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "default": "5Mi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "delta": { - "description": "If set rclone will use delta listing to implement recursive listings.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, @@ -11267,981 +16434,1000 @@ const docTemplate = `{ "description": "Description of the remote.", "type": "string" }, - "disableSitePermission": { - "description": "Disable the request for Sites.Read.All permission.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "driveId": { - "description": "The ID of the drive to use.", - "type": "string" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "driveType": { - "description": "The type of the drive (personal | business | documentLibrary).", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "exposeOnenoteFiles": { - "description": "Set to make OneNote files show up in directory listings.", + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string", + "example": "s5lu.com" + }, + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", - "default": false + "default": false, + "example": false }, - "hardDelete": { - "description": "Permanently delete files on removal.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", - "default": false + "default": true }, - "hashType": { - "description": "Specify the hash in use for the backend.", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "auto", - "example": "auto" + "default": "unset" }, - "linkPassword": { - "description": "Set the password for links created by the link command.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "linkScope": { - "description": "Set the scope of the links created by the link command.", - "type": "string", - "default": "anonymous", - "example": "anonymous" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "linkType": { - "description": "Set the type of the links created by the link command.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "default": "view", - "example": "view" + "default": "1m0s" }, - "listChunk": { - "description": "Size of listing chunk.", - "type": "integer", - "default": 1000 + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "metadataPermissions": { - "description": "Control whether permissions should be read or written in metadata.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "off", - "example": "off" + "default": "unset" }, - "noVersions": { - "description": "Remove all versions on modifying operations.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" + }, "region": { - "description": "Choose national cloud region for OneDrive.", + "description": "Region to connect to.", "type": "string", - "default": "global", "example": "global" }, - "rootFolderId": { - "description": "ID of the root folder.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "serverSideAcrossConfigs": { - "description": "Deprecated: use --server-side-across-configs instead.", - "type": "boolean", - "default": false + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" - } - } - }, - "storage.oosEnv_authConfig": { - "type": "object", - "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false }, - "chunkSize": { - "description": "Chunk size to use for uploading.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "5Mi" + "default": "Off" }, - "compartment": { - "description": "Object storage compartment OCID", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", - "type": "string", - "default": "4.656Gi" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "copyTimeout": { - "description": "Timeout for copy.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "1m0s" + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", - "type": "boolean", - "default": false + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "encoding": { - "description": "The encoding for the backend.", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" + "default": "unset" }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 - }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", - "type": "string" - }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "" + "default": "unset" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "example": "" + "default": "unset" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "off" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 10 + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosInstance_principal_authConfig": { + "storage.s3FlashBladeConfig": { "type": "object", "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Object storage API.", + "description": "Endpoint for S3 API.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", - "default": false + "default": false, + "example": false + }, + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, "noCheckBucket": { "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", - "type": "string", - "example": "" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", - "type": "string", - "example": "" + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" + }, + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 10 + "default": 4 }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", "default": "200Mi" - } - } - }, - "storage.oosNo_authConfig": { - "type": "object", - "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false - }, - "chunkSize": { - "description": "Chunk size to use for uploading.", - "type": "string", - "default": "5Mi" - }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", - "type": "string", - "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "default": "1m0s" - }, - "description": { - "description": "Description of the remote.", - "type": "string" - }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", - "type": "boolean", - "default": false + "default": "unset" }, - "encoding": { - "description": "The encoding for the backend.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "default": "unset" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 - }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", - "type": "string" - }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "" + "default": "unset" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "example": "" + "default": "unset" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "off" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 10 + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosResource_principal_authConfig": { + "storage.s3GCSConfig": { "type": "object", "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" - }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 - }, - "namespace": { - "description": "Object storage namespace", - "type": "string" - }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", - "type": "string", - "example": "" - }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "" + "default": "Slash,InvalidUtf8,Dot" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "" + "example": "https://storage.googleapis.com" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", - "type": "string", - "default": "Standard", - "example": "Standard" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", - "default": 10 + "default": 1000 }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "200Mi" - } - } - }, - "storage.oosUser_principal_authConfig": { - "type": "object", - "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false + "default": "unset" }, - "chunkSize": { - "description": "Chunk size to use for uploading.", - "type": "string", - "default": "5Mi" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "compartment": { - "description": "Object storage compartment OCID", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" }, - "configFile": { - "description": "Path to OCI config file", - "type": "string", - "default": "~/.oci/config", - "example": "~/.oci/config" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "configProfile": { - "description": "Profile name inside the oci config file", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "default": "Default", - "example": "Default" + "default": "1m0s" }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", - "type": "string", - "default": "4.656Gi" + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "copyTimeout": { - "description": "Timeout for copy.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "1m0s" + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" }, - "namespace": { - "description": "Object storage namespace", + "region": { + "description": "Region to connect to.", "type": "string" }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", - "type": "boolean", - "default": false + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "region": { - "description": "Object storage Region", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", - "type": "string", - "example": "" + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", - "type": "string", - "example": "" + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", - "type": "string", - "example": "" + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 10 + "default": 4 }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", "default": "200Mi" - } - } - }, - "storage.oosWorkload_identity_authConfig": { - "type": "object", - "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false - }, - "chunkSize": { - "description": "Chunk size to use for uploading.", - "type": "string", - "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "default": "4.656Gi" + "default": "unset" }, - "copyTimeout": { - "description": "Timeout for copy.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "1m0s" - }, - "description": { - "description": "Description of the remote.", - "type": "string" + "default": "unset" }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "default": "unset" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", - "type": "string" - }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", - "type": "string", - "example": "" - }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "example": "" + "default": "unset" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "off" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 10 + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.opendriveConfig": { + "storage.s3HetznerConfig": { "type": "object", "properties": { - "chunkSize": { - "description": "Files will be uploaded in chunks this size.", - "type": "string", - "default": "10Mi" + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "description": { - "description": "Description of the remote.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot" + "example": "private" }, - "password": { - "description": "Password.", - "type": "string" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "username": { - "description": "Username.", - "type": "string" - } - } - }, - "storage.pcloudConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "clientId": { - "description": "OAuth Client Id.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false }, - "description": { - "description": "Description of the remote.", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "hostname": { - "description": "Hostname to connect to.", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "default": "api.pcloud.com", - "example": "api.pcloud.com" + "example": "hel1.your-objectstorage.com" }, - "password": { - "description": "Your pcloud password.", - "type": "string" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "rootFolderId": { - "description": "Fill in for rclone to use a non root folder as its starting point.", - "type": "string", - "default": "d0" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" }, - "username": { - "description": "Your pcloud username.", - "type": "string" - } - } - }, - "storage.premiumizemeConfig": { - "type": "object", - "properties": { - "apiKey": { - "description": "API Key.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "authUrl": { - "description": "Auth server URL.", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "unset" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "tokenUrl": { - "description": "Token server url.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.putioConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "hel1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "clientId": { - "description": "OAuth Client Id.", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "description": { - "description": "Description of the remote.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Off" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" - } - } - }, - "storage.qingstorConfig": { - "type": "object", - "properties": { - "accessKeyId": { - "description": "QingStor Access Key ID.", + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "chunkSize": { - "description": "Chunk size to use for uploading.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "4Mi" + "default": "unset" }, - "connectionRetries": { - "description": "Number of connection retries.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 3 + "default": 4 }, - "description": { - "description": "Description of the remote.", - "type": "string" + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "encoding": { - "description": "The encoding for the backend.", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "default": "Slash,Ctl,InvalidUtf8" + "default": "unset" }, - "endpoint": { - "description": "Enter an endpoint URL to connection QingStor API.", - "type": "string" + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "envAuth": { - "description": "Get QingStor credentials from runtime.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", - "default": false, - "example": false + "default": false }, - "secretAccessKey": { - "description": "QingStor Secret Access Key (password).", - "type": "string" + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 1 + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "200Mi" + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false + }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "zone": { - "description": "Zone to connect to.", - "type": "string", - "example": "pek3a" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.s3AWSConfig": { + "storage.s3HuaweiOBSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12302,7 +17488,8 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "obs.af-south-1.myhuaweicloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12315,11 +17502,6 @@ const docTemplate = `{ "type": "boolean", "default": true }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", - "type": "boolean", - "default": false - }, "listChunk": { "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", @@ -12335,11 +17517,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string", - "example": "" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -12387,12 +17564,23 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "us-east-1" + "example": "af-south-1" }, - "requesterPays": { - "description": "Enables requester pays option when interacting with S3 bucket.", - "type": "boolean", - "default": false + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -12403,11 +17591,6 @@ const docTemplate = `{ "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -12416,39 +17599,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", - "type": "string", - "example": "" - }, - "storageClass": { - "description": "The storage class to use when storing new objects in S3.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" - }, - "stsEndpoint": { - "description": "Endpoint for STS (deprecated).", - "type": "string" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -12460,11 +17614,6 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, - "useAccelerateEndpoint": { - "description": "If true use the AWS S3 accelerated endpoint.", - "type": "boolean", - "default": false - }, "useAcceptEncodingGzip": { "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", @@ -12475,6 +17624,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -12500,6 +17659,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12522,7 +17686,7 @@ const docTemplate = `{ } } }, - "storage.s3AlibabaConfig": { + "storage.s3IBMCOSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12531,7 +17695,8 @@ const docTemplate = `{ }, "acl": { "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" + "type": "string", + "example": "private" }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", @@ -12582,9 +17747,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for OSS API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "oss-accelerate.aliyuncs.com" + "example": "s3.us.cloud-object-storage.appdomain.cloud" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12597,6 +17762,14 @@ const docTemplate = `{ "type": "boolean", "default": true }, + "ibmApiKey": { + "description": "IBM API Key to be used to obtain IAM token", + "type": "string" + }, + "ibmResourceInstanceId": { + "description": "IBM service instance id", + "type": "string" + }, "listChunk": { "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", @@ -12612,6 +17785,11 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string", + "example": "us-standard" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -12656,6 +17834,26 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -12673,10 +17871,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in OSS.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -12698,6 +17896,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -12723,6 +17931,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12745,7 +17958,7 @@ const docTemplate = `{ } } }, - "storage.s3ArvanCloudConfig": { + "storage.s3IDriveConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12804,11 +18017,6 @@ const docTemplate = `{ "type": "string", "default": "Slash,InvalidUtf8,Dot" }, - "endpoint": { - "description": "Endpoint for Arvan Cloud Object Storage (AOS) API.", - "type": "string", - "example": "s3.ir-thr-at1.arvanstorage.ir" - }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", @@ -12835,11 +18043,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint.", - "type": "string", - "example": "ir-thr-at1" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -12884,6 +18087,22 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -12901,10 +18120,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in ArvanCloud.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "STANDARD" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -12926,6 +18145,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -12951,6 +18180,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12973,7 +18207,7 @@ const docTemplate = `{ } } }, - "storage.s3CephConfig": { + "storage.s3IONOSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13034,7 +18268,8 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3-eu-central-1.ionoscloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13062,10 +18297,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -13113,7 +18344,23 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "eu-central-2" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -13124,11 +18371,6 @@ const docTemplate = `{ "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -13137,30 +18379,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -13182,6 +18404,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -13207,6 +18439,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13229,7 +18466,7 @@ const docTemplate = `{ } } }, - "storage.s3ChinaMobileConfig": { + "storage.s3IntercoloConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13289,9 +18526,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "eos-wuxi-1.cmecloud.cn" + "example": "de-fra.i3storage.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13319,11 +18556,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint.", - "type": "string", - "example": "wuxi1" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -13368,52 +18600,48 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "sdkLogMode": { - "description": "Set to debug the SDK", + "region": { + "description": "Region to connect to.", "type": "string", - "default": "Off" + "example": "de-fra" }, - "secretAccessKey": { - "description": "AWS Secret Access Key (password).", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "sessionToken": { - "description": "An AWS session token.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "sharedCredentialsFile": { - "description": "Path to the shared credentials file.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in ChinaMobile.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -13435,6 +18663,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -13460,6 +18698,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13482,13 +18725,17 @@ const docTemplate = `{ } } }, - "storage.s3CloudflareConfig": { + "storage.s3LeviiaConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -13539,7 +18786,8 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.leviia.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13613,8 +18861,23 @@ const docTemplate = `{ }, "region": { "description": "Region to connect to.", - "type": "string", - "example": "auto" + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -13633,6 +18896,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -13653,6 +18921,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -13678,6 +18956,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13700,7 +18983,7 @@ const docTemplate = `{ } } }, - "storage.s3DigitalOceanConfig": { + "storage.s3LiaraConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13762,7 +19045,7 @@ const docTemplate = `{ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "syd1.digitaloceanspaces.com" + "example": "storage.iran.liara.space" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13790,10 +19073,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -13838,10 +19117,21 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -13860,6 +19150,15 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -13880,6 +19179,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -13905,6 +19214,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13927,7 +19241,7 @@ const docTemplate = `{ } } }, - "storage.s3DreamhostConfig": { + "storage.s3LinodeConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13989,7 +19303,7 @@ const docTemplate = `{ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "objects-us-east-1.dream.io" + "example": "nl-ams-1.linodeobjects.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14017,10 +19331,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -14065,10 +19375,21 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -14087,6 +19408,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -14107,6 +19433,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -14132,6 +19468,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14154,7 +19495,7 @@ const docTemplate = `{ } } }, - "storage.s3GCSConfig": { + "storage.s3LyveCloudConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14214,9 +19555,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Google Cloud Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "https://storage.googleapis.com" + "example": "s3.us-west-1.{account_name}.lyve.seagate.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14294,8 +19635,23 @@ const docTemplate = `{ }, "region": { "description": "Region to connect to.", - "type": "string", - "example": "" + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -14314,6 +19670,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -14334,6 +19695,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -14359,6 +19730,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14381,7 +19757,7 @@ const docTemplate = `{ } } }, - "storage.s3HuaweiOBSConfig": { + "storage.s3MagaluConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14441,9 +19817,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for OBS API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "obs.af-south-1.myhuaweicloud.com" + "example": "br-se1.magaluobjects.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14515,10 +19891,21 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.", - "type": "string", - "example": "af-south-1" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -14537,6 +19924,15 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -14557,6 +19953,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -14582,6 +19988,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14604,18 +20015,13 @@ const docTemplate = `{ } } }, - "storage.s3IBMCOSConfig": { + "storage.s3MegaConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string", - "example": "private" - }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -14665,9 +20071,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for IBM COS S3 API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.us.cloud-object-storage.appdomain.cloud" + "example": "s3.eu-central-1.s4.mega.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14695,11 +20101,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint when using IBM Cloud Public.", - "type": "string", - "example": "us-standard" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -14744,10 +20145,21 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -14766,6 +20178,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -14786,6 +20203,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -14811,6 +20238,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14833,7 +20265,7 @@ const docTemplate = `{ } } }, - "storage.s3IDriveConfig": { + "storage.s3MinioConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14892,6 +20324,10 @@ const docTemplate = `{ "type": "string", "default": "Slash,InvalidUtf8,Dot" }, + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string" + }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", @@ -14918,6 +20354,10 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -14962,6 +20402,26 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -14971,13 +20431,47 @@ const docTemplate = `{ "description": "AWS Secret Access Key (password).", "type": "string" }, - "sessionToken": { - "description": "An AWS session token.", - "type": "string" + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string" + }, + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "sharedCredentialsFile": { - "description": "Path to the shared credentials file.", - "type": "string" + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", + "type": "string", + "example": "" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -14999,6 +20493,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15024,6 +20528,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15046,7 +20555,7 @@ const docTemplate = `{ } } }, - "storage.s3IONOSConfig": { + "storage.s3NeteaseConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15106,9 +20615,8 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for IONOS S3 Object Storage.", - "type": "string", - "example": "s3-eu-central-1.ionoscloud.com" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15136,6 +20644,10 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -15181,9 +20693,24 @@ const docTemplate = `{ "type": "string" }, "region": { - "description": "Region where your bucket will be created and your data stored.", - "type": "string", - "example": "de" + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -15202,6 +20729,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -15222,6 +20754,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15247,6 +20789,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15269,7 +20816,7 @@ const docTemplate = `{ } } }, - "storage.s3LeviiaConfig": { + "storage.s3OVHcloudConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15330,7 +20877,8 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.gra.io.cloud.ovh.net" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15405,7 +20953,23 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "gra" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -15424,6 +20988,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -15444,6 +21013,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15469,6 +21048,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15491,7 +21075,7 @@ const docTemplate = `{ } } }, - "storage.s3LiaraConfig": { + "storage.s3OtherConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15551,9 +21135,8 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Liara Object Storage API.", - "type": "string", - "example": "storage.iran.liara.space" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15581,6 +21164,10 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -15625,6 +21212,26 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -15642,10 +21249,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Liara", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "STANDARD" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -15667,6 +21274,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15692,6 +21309,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15714,7 +21336,7 @@ const docTemplate = `{ } } }, - "storage.s3LinodeConfig": { + "storage.s3OutscaleConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15774,9 +21396,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Linode Object Storage API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "us-southeast-1.linodeobjects.com" + "example": "oos.eu-west-2.outscale.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15848,6 +21470,27 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "eu-west-2" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -15865,6 +21508,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -15885,6 +21533,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15910,6 +21568,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15932,7 +21595,7 @@ const docTemplate = `{ } } }, - "storage.s3LyveCloudConfig": { + "storage.s3PetaboxConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15994,7 +21657,7 @@ const docTemplate = `{ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.us-east-1.lyvecloud.seagate.com" + "example": "s3.petabox.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -16022,10 +21685,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -16073,7 +21732,23 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "eu-central-1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -16092,6 +21767,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -16112,6 +21792,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -16137,6 +21827,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -16159,7 +21854,7 @@ const docTemplate = `{ } } }, - "storage.s3MagaluConfig": { + "storage.s3QiniuConfig": { "type": "object", "properties": { "accessKeyId": { @@ -16221,7 +21916,7 @@ const docTemplate = `{ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "br-se1.magaluobjects.com" + "example": "s3-cn-east-1.qiniucs.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -16249,6 +21944,11 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string", + "example": "cn-east-1" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -16293,6 +21993,27 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "cn-east-1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -16310,10 +22031,15 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "storageClass": { - "description": "The storage class to use when storing new objects in Magalu.", + "description": "The storage class to use when storing new objects in S3.", "type": "string", - "example": "STANDARD" + "example": "LINE" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -16335,6 +22061,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -16360,6 +22096,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -16382,22 +22123,13 @@ const docTemplate = `{ } } }, - "storage.s3MinioConfig": { + "storage.s3RabataConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -16443,7 +22175,8 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.us-east-1.rabata.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -16473,7 +22206,8 @@ const docTemplate = `{ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string" + "type": "string", + "example": "us-east-1" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -16522,7 +22256,23 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "eu-west-1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -16533,11 +22283,6 @@ const docTemplate = `{ "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -16546,30 +22291,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -16591,6 +22316,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -16616,6 +22351,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -16638,7 +22378,7 @@ const docTemplate = `{ } } }, - "storage.s3NeteaseConfig": { + "storage.s3RackCorpConfig": { "type": "object", "properties": { "accessKeyId": { @@ -16699,7 +22439,8 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.rackcorp.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -16729,7 +22470,8 @@ const docTemplate = `{ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string" + "type": "string", + "example": "global" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -16778,7 +22520,23 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "global" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -16797,6 +22555,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -16817,6 +22580,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -16842,6 +22615,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -16864,22 +22642,13 @@ const docTemplate = `{ } } }, - "storage.s3OtherConfig": { + "storage.s3RcloneConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -16953,10 +22722,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -17001,10 +22766,21 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17023,6 +22799,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -17043,6 +22824,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17068,6 +22859,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17090,7 +22886,7 @@ const docTemplate = `{ } } }, - "storage.s3PetaboxConfig": { + "storage.s3ScalewayConfig": { "type": "object", "properties": { "accessKeyId": { @@ -17150,9 +22946,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Petabox S3 Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.petabox.io" + "example": "s3.nl-ams.scw.cloud" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -17225,9 +23021,25 @@ const docTemplate = `{ "type": "string" }, "region": { - "description": "Region where your bucket will be created and your data stored.", + "description": "Region to connect to.", "type": "string", - "example": "us-east-1" + "example": "nl-ams" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17246,6 +23058,16 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -17266,6 +23088,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17291,6 +23123,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17313,7 +23150,7 @@ const docTemplate = `{ } } }, - "storage.s3QiniuConfig": { + "storage.s3SeaweedFSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -17373,9 +23210,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Qiniu Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3-cn-east-1.qiniucs.com" + "example": "localhost:8333" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -17405,8 +23242,7 @@ const docTemplate = `{ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string", - "example": "cn-east-1" + "type": "string" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -17454,8 +23290,23 @@ const docTemplate = `{ }, "region": { "description": "Region to connect to.", - "type": "string", - "example": "cn-east-1" + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17474,10 +23325,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Qiniu.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "STANDARD" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -17499,6 +23350,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17524,6 +23385,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17546,22 +23412,13 @@ const docTemplate = `{ } } }, - "storage.s3RackCorpConfig": { + "storage.s3SelectelConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -17606,9 +23463,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for RackCorp Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.rackcorp.com" + "example": "s3.ru-1.storage.selcloud.ru" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -17636,11 +23493,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - the location where your bucket will be located and your data stored.", - "type": "string", - "example": "global" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -17686,9 +23538,25 @@ const docTemplate = `{ "type": "string" }, "region": { - "description": "region - the location where your bucket will be created and your data stored.", + "description": "Region to connect to.", "type": "string", - "example": "global" + "example": "ru-3" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17707,6 +23575,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -17727,6 +23600,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17752,6 +23635,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17774,17 +23662,13 @@ const docTemplate = `{ } } }, - "storage.s3RcloneConfig": { + "storage.s3ServercoreConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -17835,7 +23719,8 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.ru-1.storage.selcloud.ru" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -17863,10 +23748,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -17914,7 +23795,23 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "uz-2" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17933,6 +23830,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -17953,6 +23855,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17978,6 +23890,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18000,22 +23917,13 @@ const docTemplate = `{ } } }, - "storage.s3ScalewayConfig": { + "storage.s3SpectraLogicConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -18060,9 +23968,8 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Scaleway Object Storage.", - "type": "string", - "example": "s3.nl-ams.scw.cloud" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18134,10 +24041,21 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "nl-ams" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -18156,10 +24074,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in S3.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -18181,6 +24099,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -18206,6 +24134,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18228,7 +24161,7 @@ const docTemplate = `{ } } }, - "storage.s3SeaweedFSConfig": { + "storage.s3StackPathConfig": { "type": "object", "properties": { "accessKeyId": { @@ -18290,7 +24223,7 @@ const docTemplate = `{ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "localhost:8333" + "example": "s3.us-east-2.stackpathstorage.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18318,10 +24251,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -18368,8 +24297,23 @@ const docTemplate = `{ }, "region": { "description": "Region to connect to.", - "type": "string", - "example": "" + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -18388,6 +24332,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -18408,6 +24357,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -18433,6 +24392,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18455,22 +24419,13 @@ const docTemplate = `{ } } }, - "storage.s3StackPathConfig": { + "storage.s3StorjConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -18515,9 +24470,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for StackPath Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.us-east-2.stackpathstorage.com" + "example": "gateway.storjshare.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18589,10 +24544,21 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -18611,6 +24577,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -18631,6 +24602,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -18656,6 +24637,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18678,18 +24664,13 @@ const docTemplate = `{ } } }, - "storage.s3StorjConfig": { + "storage.s3SynologyConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -18734,9 +24715,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Storj Gateway.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "gateway.storjshare.io" + "example": "eu-001.s3.synologyc2.net" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18764,6 +24745,10 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -18808,6 +24793,27 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "eu-001" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -18825,6 +24831,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -18845,6 +24856,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -18870,6 +24891,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18892,13 +24918,18 @@ const docTemplate = `{ } } }, - "storage.s3SynologyConfig": { + "storage.s3TencentCOSConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string", + "example": "default" + }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -18948,9 +24979,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Synology C2 Object Storage API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "eu-001.s3.synologyc2.net" + "example": "cos.ap-beijing.myqcloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18978,10 +25009,6 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -19026,10 +25053,21 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region where your data stored.", - "type": "string", - "example": "eu-001" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -19048,6 +25086,16 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "ARCHIVE" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -19068,6 +25116,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -19093,6 +25151,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -19115,7 +25178,7 @@ const docTemplate = `{ } } }, - "storage.s3TencentCOSConfig": { + "storage.s3WasabiConfig": { "type": "object", "properties": { "accessKeyId": { @@ -19124,8 +25187,7 @@ const docTemplate = `{ }, "acl": { "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string", - "example": "default" + "type": "string" }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", @@ -19176,9 +25238,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Tencent COS API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "cos.ap-beijing.myqcloud.com" + "example": "s3.wasabisys.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -19206,6 +25268,10 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -19250,6 +25316,26 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -19267,10 +25353,10 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Tencent COS.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -19292,6 +25378,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -19317,6 +25413,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -19339,7 +25440,7 @@ const docTemplate = `{ } } }, - "storage.s3WasabiConfig": { + "storage.s3ZataConfig": { "type": "object", "properties": { "accessKeyId": { @@ -19401,7 +25502,7 @@ const docTemplate = `{ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.wasabisys.com" + "example": "idr01.zata.ai" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -19480,7 +25581,23 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "us-east-1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -19499,6 +25616,11 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -19519,6 +25641,16 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -19544,6 +25676,11 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -19590,7 +25727,7 @@ const docTemplate = `{ "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8" + "default": "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8,Dot" }, "library": { "description": "Name of the library.", @@ -19623,6 +25760,10 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "blake3sumCommand": { + "description": "The command used to read BLAKE3 hashes.", + "type": "string" + }, "chunkSize": { "description": "Upload and download chunk size.", "type": "string", @@ -19647,6 +25788,10 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "crc32sumCommand": { + "description": "The command used to read CRC-32 hashes.", + "type": "string" + }, "description": { "description": "Description of the remote.", "type": "string" @@ -19666,6 +25811,10 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "hashes": { + "description": "Comma separated list of supported checksum types.", + "type": "string" + }, "host": { "description": "SSH host to connect to.", "type": "string" @@ -19674,6 +25823,10 @@ const docTemplate = `{ "description": "Space separated list of host key algorithms, ordered by preference.", "type": "string" }, + "httpProxy": { + "description": "URL for HTTP CONNECT proxy", + "type": "string" + }, "idleTimeout": { "description": "Max time before closing idle connections.", "type": "string", @@ -19710,7 +25863,7 @@ const docTemplate = `{ "type": "string" }, "md5sumCommand": { - "description": "The command used to read md5 hashes.", + "description": "The command used to read MD5 hashes.", "type": "string" }, "pass": { @@ -19726,6 +25879,10 @@ const docTemplate = `{ "type": "integer", "default": 22 }, + "pubkey": { + "description": "SSH public certificate for public certificate based authentication.", + "type": "string" + }, "pubkeyFile": { "description": "Optional path to public key file.", "type": "string" @@ -19744,7 +25901,11 @@ const docTemplate = `{ "default": true }, "sha1sumCommand": { - "description": "The command used to read sha1 hashes.", + "description": "The command used to read SHA-1 hashes.", + "type": "string" + }, + "sha256sumCommand": { + "description": "The command used to read SHA-256 hashes.", "type": "string" }, "shellType": { @@ -19785,6 +25946,14 @@ const docTemplate = `{ "description": "SSH username.", "type": "string", "default": "$USER" + }, + "xxh128sumCommand": { + "description": "The command used to read XXH128 hashes.", + "type": "string" + }, + "xxh3sumCommand": { + "description": "The command used to read XXH3 hashes.", + "type": "string" } } }, @@ -19800,6 +25969,11 @@ const docTemplate = `{ "type": "string", "default": "64Mi" }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, "clientId": { "description": "OAuth Client Id.", "type": "string" @@ -19905,6 +26079,10 @@ const docTemplate = `{ "type": "string", "default": "1m0s" }, + "kerberosCcache": { + "description": "Path to the Kerberos credential cache (krb5cc).", + "type": "string" + }, "pass": { "description": "SMB password.", "type": "string" @@ -19918,6 +26096,11 @@ const docTemplate = `{ "description": "Service principal name.", "type": "string" }, + "useKerberos": { + "description": "Use Kerberos authentication.", + "type": "boolean", + "default": false + }, "user": { "description": "SMB username.", "type": "string", @@ -20182,32 +26365,14 @@ const docTemplate = `{ } } }, - "storage.uptoboxConfig": { + "storage.webdavConfig": { "type": "object", "properties": { - "accessToken": { - "description": "Your access token.", - "type": "string" - }, - "description": { - "description": "Description of the remote.", - "type": "string" - }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot" - }, - "private": { - "description": "Set to make uploaded files private", + "authRedirect": { + "description": "Preserve authentication on redirect.", "type": "boolean", "default": false - } - } - }, - "storage.webdavConfig": { - "type": "object", - "properties": { + }, "bearerToken": { "description": "Bearer token instead of user/pass (e.g. a Macaroon).", "type": "string" @@ -20278,6 +26443,11 @@ const docTemplate = `{ "description": "Auth server URL.", "type": "string" }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, "clientId": { "description": "OAuth Client Id.", "type": "string" @@ -20322,6 +26492,11 @@ const docTemplate = `{ "description": "Auth server URL.", "type": "string" }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, "clientId": { "description": "OAuth Client Id.", "type": "string" @@ -20351,6 +26526,11 @@ const docTemplate = `{ "tokenUrl": { "description": "Token server url.", "type": "string" + }, + "uploadCutoff": { + "description": "Cutoff for switching to large file upload api (\u003e= 10 MiB).", + "type": "string", + "default": "10Mi" } } }, diff --git a/docs/swagger/swagger.json b/docs/swagger/swagger.json index fc9b5b664..2df55b2e6 100644 --- a/docs/swagger/swagger.json +++ b/docs/swagger/swagger.json @@ -3658,7 +3658,7 @@ } } }, - "/storage/s3/ceph": { + "/storage/s3/bizflycloud": { "post": { "consumes": [ "application/json" @@ -3669,8 +3669,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Ceph - Ceph Object Storage", - "operationId": "CreateS3CephStorage", + "summary": "Create S3 storage with BizflyCloud - Bizfly Cloud Simple Storage", + "operationId": "CreateS3BizflyCloudStorage", "parameters": [ { "description": "Request body", @@ -3678,7 +3678,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3CephStorageRequest" + "$ref": "#/definitions/storage.createS3BizflyCloudStorageRequest" } } ], @@ -3704,7 +3704,7 @@ } } }, - "/storage/s3/chinamobile": { + "/storage/s3/ceph": { "post": { "consumes": [ "application/json" @@ -3715,8 +3715,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with ChinaMobile - China Mobile Ecloud Elastic Object Storage (EOS)", - "operationId": "CreateS3ChinaMobileStorage", + "summary": "Create S3 storage with Ceph - Ceph Object Storage", + "operationId": "CreateS3CephStorage", "parameters": [ { "description": "Request body", @@ -3724,7 +3724,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3ChinaMobileStorageRequest" + "$ref": "#/definitions/storage.createS3CephStorageRequest" } } ], @@ -3750,7 +3750,7 @@ } } }, - "/storage/s3/cloudflare": { + "/storage/s3/chinamobile": { "post": { "consumes": [ "application/json" @@ -3761,8 +3761,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Cloudflare - Cloudflare R2 Storage", - "operationId": "CreateS3CloudflareStorage", + "summary": "Create S3 storage with ChinaMobile - China Mobile Ecloud Elastic Object Storage (EOS)", + "operationId": "CreateS3ChinaMobileStorage", "parameters": [ { "description": "Request body", @@ -3770,7 +3770,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3CloudflareStorageRequest" + "$ref": "#/definitions/storage.createS3ChinaMobileStorageRequest" } } ], @@ -3796,7 +3796,7 @@ } } }, - "/storage/s3/digitalocean": { + "/storage/s3/cloudflare": { "post": { "consumes": [ "application/json" @@ -3807,8 +3807,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with DigitalOcean - DigitalOcean Spaces", - "operationId": "CreateS3DigitalOceanStorage", + "summary": "Create S3 storage with Cloudflare - Cloudflare R2 Storage", + "operationId": "CreateS3CloudflareStorage", "parameters": [ { "description": "Request body", @@ -3816,7 +3816,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3DigitalOceanStorageRequest" + "$ref": "#/definitions/storage.createS3CloudflareStorageRequest" } } ], @@ -3842,7 +3842,7 @@ } } }, - "/storage/s3/dreamhost": { + "/storage/s3/cubbit": { "post": { "consumes": [ "application/json" @@ -3853,8 +3853,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Dreamhost - Dreamhost DreamObjects", - "operationId": "CreateS3DreamhostStorage", + "summary": "Create S3 storage with Cubbit - Cubbit DS3 Object Storage", + "operationId": "CreateS3CubbitStorage", "parameters": [ { "description": "Request body", @@ -3862,7 +3862,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3DreamhostStorageRequest" + "$ref": "#/definitions/storage.createS3CubbitStorageRequest" } } ], @@ -3888,7 +3888,7 @@ } } }, - "/storage/s3/gcs": { + "/storage/s3/digitalocean": { "post": { "consumes": [ "application/json" @@ -3899,8 +3899,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with GCS - Google Cloud Storage", - "operationId": "CreateS3GCSStorage", + "summary": "Create S3 storage with DigitalOcean - DigitalOcean Spaces", + "operationId": "CreateS3DigitalOceanStorage", "parameters": [ { "description": "Request body", @@ -3908,7 +3908,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3GCSStorageRequest" + "$ref": "#/definitions/storage.createS3DigitalOceanStorageRequest" } } ], @@ -3934,7 +3934,7 @@ } } }, - "/storage/s3/huaweiobs": { + "/storage/s3/dreamhost": { "post": { "consumes": [ "application/json" @@ -3945,8 +3945,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with HuaweiOBS - Huawei Object Storage Service", - "operationId": "CreateS3HuaweiOBSStorage", + "summary": "Create S3 storage with Dreamhost - Dreamhost DreamObjects", + "operationId": "CreateS3DreamhostStorage", "parameters": [ { "description": "Request body", @@ -3954,7 +3954,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3HuaweiOBSStorageRequest" + "$ref": "#/definitions/storage.createS3DreamhostStorageRequest" } } ], @@ -3980,7 +3980,7 @@ } } }, - "/storage/s3/ibmcos": { + "/storage/s3/exaba": { "post": { "consumes": [ "application/json" @@ -3991,8 +3991,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with IBMCOS - IBM COS S3", - "operationId": "CreateS3IBMCOSStorage", + "summary": "Create S3 storage with Exaba - Exaba Object Storage", + "operationId": "CreateS3ExabaStorage", "parameters": [ { "description": "Request body", @@ -4000,7 +4000,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IBMCOSStorageRequest" + "$ref": "#/definitions/storage.createS3ExabaStorageRequest" } } ], @@ -4026,7 +4026,7 @@ } } }, - "/storage/s3/idrive": { + "/storage/s3/filelu": { "post": { "consumes": [ "application/json" @@ -4037,8 +4037,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with IDrive - IDrive e2", - "operationId": "CreateS3IDriveStorage", + "summary": "Create S3 storage with FileLu - FileLu S5 (S3-Compatible Object Storage)", + "operationId": "CreateS3FileLuStorage", "parameters": [ { "description": "Request body", @@ -4046,7 +4046,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IDriveStorageRequest" + "$ref": "#/definitions/storage.createS3FileLuStorageRequest" } } ], @@ -4072,7 +4072,7 @@ } } }, - "/storage/s3/ionos": { + "/storage/s3/flashblade": { "post": { "consumes": [ "application/json" @@ -4083,8 +4083,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with IONOS - IONOS Cloud", - "operationId": "CreateS3IONOSStorage", + "summary": "Create S3 storage with FlashBlade - Pure Storage FlashBlade Object Storage", + "operationId": "CreateS3FlashBladeStorage", "parameters": [ { "description": "Request body", @@ -4092,7 +4092,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IONOSStorageRequest" + "$ref": "#/definitions/storage.createS3FlashBladeStorageRequest" } } ], @@ -4118,7 +4118,7 @@ } } }, - "/storage/s3/leviia": { + "/storage/s3/gcs": { "post": { "consumes": [ "application/json" @@ -4129,8 +4129,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Leviia - Leviia Object Storage", - "operationId": "CreateS3LeviiaStorage", + "summary": "Create S3 storage with GCS - Google Cloud Storage", + "operationId": "CreateS3GCSStorage", "parameters": [ { "description": "Request body", @@ -4138,7 +4138,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LeviiaStorageRequest" + "$ref": "#/definitions/storage.createS3GCSStorageRequest" } } ], @@ -4164,7 +4164,7 @@ } } }, - "/storage/s3/liara": { + "/storage/s3/hetzner": { "post": { "consumes": [ "application/json" @@ -4175,8 +4175,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Liara - Liara Object Storage", - "operationId": "CreateS3LiaraStorage", + "summary": "Create S3 storage with Hetzner - Hetzner Object Storage", + "operationId": "CreateS3HetznerStorage", "parameters": [ { "description": "Request body", @@ -4184,7 +4184,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LiaraStorageRequest" + "$ref": "#/definitions/storage.createS3HetznerStorageRequest" } } ], @@ -4210,7 +4210,7 @@ } } }, - "/storage/s3/linode": { + "/storage/s3/huaweiobs": { "post": { "consumes": [ "application/json" @@ -4221,8 +4221,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Linode - Linode Object Storage", - "operationId": "CreateS3LinodeStorage", + "summary": "Create S3 storage with HuaweiOBS - Huawei Object Storage Service", + "operationId": "CreateS3HuaweiOBSStorage", "parameters": [ { "description": "Request body", @@ -4230,7 +4230,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LinodeStorageRequest" + "$ref": "#/definitions/storage.createS3HuaweiOBSStorageRequest" } } ], @@ -4256,7 +4256,7 @@ } } }, - "/storage/s3/lyvecloud": { + "/storage/s3/ibmcos": { "post": { "consumes": [ "application/json" @@ -4267,8 +4267,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with LyveCloud - Seagate Lyve Cloud", - "operationId": "CreateS3LyveCloudStorage", + "summary": "Create S3 storage with IBMCOS - IBM COS S3", + "operationId": "CreateS3IBMCOSStorage", "parameters": [ { "description": "Request body", @@ -4276,7 +4276,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LyveCloudStorageRequest" + "$ref": "#/definitions/storage.createS3IBMCOSStorageRequest" } } ], @@ -4302,7 +4302,7 @@ } } }, - "/storage/s3/magalu": { + "/storage/s3/idrive": { "post": { "consumes": [ "application/json" @@ -4313,8 +4313,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Magalu - Magalu Object Storage", - "operationId": "CreateS3MagaluStorage", + "summary": "Create S3 storage with IDrive - IDrive e2", + "operationId": "CreateS3IDriveStorage", "parameters": [ { "description": "Request body", @@ -4322,7 +4322,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3MagaluStorageRequest" + "$ref": "#/definitions/storage.createS3IDriveStorageRequest" } } ], @@ -4348,7 +4348,7 @@ } } }, - "/storage/s3/minio": { + "/storage/s3/intercolo": { "post": { "consumes": [ "application/json" @@ -4359,8 +4359,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Minio - Minio Object Storage", - "operationId": "CreateS3MinioStorage", + "summary": "Create S3 storage with Intercolo - Intercolo Object Storage", + "operationId": "CreateS3IntercoloStorage", "parameters": [ { "description": "Request body", @@ -4368,7 +4368,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3MinioStorageRequest" + "$ref": "#/definitions/storage.createS3IntercoloStorageRequest" } } ], @@ -4394,7 +4394,7 @@ } } }, - "/storage/s3/netease": { + "/storage/s3/ionos": { "post": { "consumes": [ "application/json" @@ -4405,8 +4405,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Netease - Netease Object Storage (NOS)", - "operationId": "CreateS3NeteaseStorage", + "summary": "Create S3 storage with IONOS - IONOS Cloud", + "operationId": "CreateS3IONOSStorage", "parameters": [ { "description": "Request body", @@ -4414,7 +4414,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3NeteaseStorageRequest" + "$ref": "#/definitions/storage.createS3IONOSStorageRequest" } } ], @@ -4440,7 +4440,7 @@ } } }, - "/storage/s3/other": { + "/storage/s3/leviia": { "post": { "consumes": [ "application/json" @@ -4451,8 +4451,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Other - Any other S3 compatible provider", - "operationId": "CreateS3OtherStorage", + "summary": "Create S3 storage with Leviia - Leviia Object Storage", + "operationId": "CreateS3LeviiaStorage", "parameters": [ { "description": "Request body", @@ -4460,7 +4460,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3OtherStorageRequest" + "$ref": "#/definitions/storage.createS3LeviiaStorageRequest" } } ], @@ -4486,7 +4486,7 @@ } } }, - "/storage/s3/petabox": { + "/storage/s3/liara": { "post": { "consumes": [ "application/json" @@ -4497,8 +4497,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Petabox - Petabox Object Storage", - "operationId": "CreateS3PetaboxStorage", + "summary": "Create S3 storage with Liara - Liara Object Storage", + "operationId": "CreateS3LiaraStorage", "parameters": [ { "description": "Request body", @@ -4506,7 +4506,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3PetaboxStorageRequest" + "$ref": "#/definitions/storage.createS3LiaraStorageRequest" } } ], @@ -4532,7 +4532,7 @@ } } }, - "/storage/s3/qiniu": { + "/storage/s3/linode": { "post": { "consumes": [ "application/json" @@ -4543,8 +4543,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Qiniu - Qiniu Object Storage (Kodo)", - "operationId": "CreateS3QiniuStorage", + "summary": "Create S3 storage with Linode - Linode Object Storage", + "operationId": "CreateS3LinodeStorage", "parameters": [ { "description": "Request body", @@ -4552,7 +4552,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3QiniuStorageRequest" + "$ref": "#/definitions/storage.createS3LinodeStorageRequest" } } ], @@ -4578,7 +4578,7 @@ } } }, - "/storage/s3/rackcorp": { + "/storage/s3/lyvecloud": { "post": { "consumes": [ "application/json" @@ -4589,8 +4589,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with RackCorp - RackCorp Object Storage", - "operationId": "CreateS3RackCorpStorage", + "summary": "Create S3 storage with LyveCloud - Seagate Lyve Cloud", + "operationId": "CreateS3LyveCloudStorage", "parameters": [ { "description": "Request body", @@ -4598,7 +4598,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3RackCorpStorageRequest" + "$ref": "#/definitions/storage.createS3LyveCloudStorageRequest" } } ], @@ -4624,7 +4624,7 @@ } } }, - "/storage/s3/rclone": { + "/storage/s3/magalu": { "post": { "consumes": [ "application/json" @@ -4635,8 +4635,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Rclone - Rclone S3 Server", - "operationId": "CreateS3RcloneStorage", + "summary": "Create S3 storage with Magalu - Magalu Object Storage", + "operationId": "CreateS3MagaluStorage", "parameters": [ { "description": "Request body", @@ -4644,7 +4644,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3RcloneStorageRequest" + "$ref": "#/definitions/storage.createS3MagaluStorageRequest" } } ], @@ -4670,7 +4670,7 @@ } } }, - "/storage/s3/scaleway": { + "/storage/s3/mega": { "post": { "consumes": [ "application/json" @@ -4681,8 +4681,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Scaleway - Scaleway Object Storage", - "operationId": "CreateS3ScalewayStorage", + "summary": "Create S3 storage with Mega - MEGA S4 Object Storage", + "operationId": "CreateS3MegaStorage", "parameters": [ { "description": "Request body", @@ -4690,7 +4690,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3ScalewayStorageRequest" + "$ref": "#/definitions/storage.createS3MegaStorageRequest" } } ], @@ -4716,7 +4716,7 @@ } } }, - "/storage/s3/seaweedfs": { + "/storage/s3/minio": { "post": { "consumes": [ "application/json" @@ -4727,8 +4727,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with SeaweedFS - SeaweedFS S3", - "operationId": "CreateS3SeaweedFSStorage", + "summary": "Create S3 storage with Minio - Minio Object Storage", + "operationId": "CreateS3MinioStorage", "parameters": [ { "description": "Request body", @@ -4736,7 +4736,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3SeaweedFSStorageRequest" + "$ref": "#/definitions/storage.createS3MinioStorageRequest" } } ], @@ -4762,7 +4762,7 @@ } } }, - "/storage/s3/stackpath": { + "/storage/s3/netease": { "post": { "consumes": [ "application/json" @@ -4773,8 +4773,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with StackPath - StackPath Object Storage", - "operationId": "CreateS3StackPathStorage", + "summary": "Create S3 storage with Netease - Netease Object Storage (NOS)", + "operationId": "CreateS3NeteaseStorage", "parameters": [ { "description": "Request body", @@ -4782,7 +4782,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3StackPathStorageRequest" + "$ref": "#/definitions/storage.createS3NeteaseStorageRequest" } } ], @@ -4808,7 +4808,7 @@ } } }, - "/storage/s3/storj": { + "/storage/s3/other": { "post": { "consumes": [ "application/json" @@ -4819,8 +4819,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Storj - Storj (S3 Compatible Gateway)", - "operationId": "CreateS3StorjStorage", + "summary": "Create S3 storage with Other - Any other S3 compatible provider", + "operationId": "CreateS3OtherStorage", "parameters": [ { "description": "Request body", @@ -4828,7 +4828,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3StorjStorageRequest" + "$ref": "#/definitions/storage.createS3OtherStorageRequest" } } ], @@ -4854,7 +4854,7 @@ } } }, - "/storage/s3/synology": { + "/storage/s3/outscale": { "post": { "consumes": [ "application/json" @@ -4865,8 +4865,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Synology - Synology C2 Object Storage", - "operationId": "CreateS3SynologyStorage", + "summary": "Create S3 storage with Outscale - OUTSCALE Object Storage (OOS)", + "operationId": "CreateS3OutscaleStorage", "parameters": [ { "description": "Request body", @@ -4874,7 +4874,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3SynologyStorageRequest" + "$ref": "#/definitions/storage.createS3OutscaleStorageRequest" } } ], @@ -4900,7 +4900,7 @@ } } }, - "/storage/s3/tencentcos": { + "/storage/s3/ovhcloud": { "post": { "consumes": [ "application/json" @@ -4911,8 +4911,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with TencentCOS - Tencent Cloud Object Storage (COS)", - "operationId": "CreateS3TencentCOSStorage", + "summary": "Create S3 storage with OVHcloud - OVHcloud Object Storage", + "operationId": "CreateS3OVHcloudStorage", "parameters": [ { "description": "Request body", @@ -4920,7 +4920,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3TencentCOSStorageRequest" + "$ref": "#/definitions/storage.createS3OVHcloudStorageRequest" } } ], @@ -4946,7 +4946,7 @@ } } }, - "/storage/s3/wasabi": { + "/storage/s3/petabox": { "post": { "consumes": [ "application/json" @@ -4957,8 +4957,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Wasabi - Wasabi Object Storage", - "operationId": "CreateS3WasabiStorage", + "summary": "Create S3 storage with Petabox - Petabox Object Storage", + "operationId": "CreateS3PetaboxStorage", "parameters": [ { "description": "Request body", @@ -4966,7 +4966,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3WasabiStorageRequest" + "$ref": "#/definitions/storage.createS3PetaboxStorageRequest" } } ], @@ -4992,7 +4992,7 @@ } } }, - "/storage/seafile": { + "/storage/s3/qiniu": { "post": { "consumes": [ "application/json" @@ -5003,8 +5003,8 @@ "tags": [ "Storage" ], - "summary": "Create Seafile storage", - "operationId": "CreateSeafileStorage", + "summary": "Create S3 storage with Qiniu - Qiniu Object Storage (Kodo)", + "operationId": "CreateS3QiniuStorage", "parameters": [ { "description": "Request body", @@ -5012,7 +5012,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSeafileStorageRequest" + "$ref": "#/definitions/storage.createS3QiniuStorageRequest" } } ], @@ -5038,7 +5038,7 @@ } } }, - "/storage/sftp": { + "/storage/s3/rabata": { "post": { "consumes": [ "application/json" @@ -5049,8 +5049,8 @@ "tags": [ "Storage" ], - "summary": "Create Sftp storage", - "operationId": "CreateSftpStorage", + "summary": "Create S3 storage with Rabata - Rabata Cloud Storage", + "operationId": "CreateS3RabataStorage", "parameters": [ { "description": "Request body", @@ -5058,7 +5058,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSftpStorageRequest" + "$ref": "#/definitions/storage.createS3RabataStorageRequest" } } ], @@ -5084,7 +5084,7 @@ } } }, - "/storage/sharefile": { + "/storage/s3/rackcorp": { "post": { "consumes": [ "application/json" @@ -5095,8 +5095,8 @@ "tags": [ "Storage" ], - "summary": "Create Sharefile storage", - "operationId": "CreateSharefileStorage", + "summary": "Create S3 storage with RackCorp - RackCorp Object Storage", + "operationId": "CreateS3RackCorpStorage", "parameters": [ { "description": "Request body", @@ -5104,7 +5104,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSharefileStorageRequest" + "$ref": "#/definitions/storage.createS3RackCorpStorageRequest" } } ], @@ -5130,7 +5130,7 @@ } } }, - "/storage/sia": { + "/storage/s3/rclone": { "post": { "consumes": [ "application/json" @@ -5141,8 +5141,8 @@ "tags": [ "Storage" ], - "summary": "Create Sia storage", - "operationId": "CreateSiaStorage", + "summary": "Create S3 storage with Rclone - Rclone S3 Server", + "operationId": "CreateS3RcloneStorage", "parameters": [ { "description": "Request body", @@ -5150,7 +5150,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSiaStorageRequest" + "$ref": "#/definitions/storage.createS3RcloneStorageRequest" } } ], @@ -5176,7 +5176,7 @@ } } }, - "/storage/smb": { + "/storage/s3/scaleway": { "post": { "consumes": [ "application/json" @@ -5187,8 +5187,8 @@ "tags": [ "Storage" ], - "summary": "Create Smb storage", - "operationId": "CreateSmbStorage", + "summary": "Create S3 storage with Scaleway - Scaleway Object Storage", + "operationId": "CreateS3ScalewayStorage", "parameters": [ { "description": "Request body", @@ -5196,7 +5196,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSmbStorageRequest" + "$ref": "#/definitions/storage.createS3ScalewayStorageRequest" } } ], @@ -5222,7 +5222,7 @@ } } }, - "/storage/storj/existing": { + "/storage/s3/seaweedfs": { "post": { "consumes": [ "application/json" @@ -5233,8 +5233,8 @@ "tags": [ "Storage" ], - "summary": "Create Storj storage with existing - Use an existing access grant.", - "operationId": "CreateStorjExistingStorage", + "summary": "Create S3 storage with SeaweedFS - SeaweedFS S3", + "operationId": "CreateS3SeaweedFSStorage", "parameters": [ { "description": "Request body", @@ -5242,7 +5242,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createStorjExistingStorageRequest" + "$ref": "#/definitions/storage.createS3SeaweedFSStorageRequest" } } ], @@ -5268,7 +5268,7 @@ } } }, - "/storage/storj/new": { + "/storage/s3/selectel": { "post": { "consumes": [ "application/json" @@ -5279,8 +5279,8 @@ "tags": [ "Storage" ], - "summary": "Create Storj storage with new - Create a new access grant from satellite address, API key, and passphrase.", - "operationId": "CreateStorjNewStorage", + "summary": "Create S3 storage with Selectel - Selectel Object Storage", + "operationId": "CreateS3SelectelStorage", "parameters": [ { "description": "Request body", @@ -5288,7 +5288,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createStorjNewStorageRequest" + "$ref": "#/definitions/storage.createS3SelectelStorageRequest" } } ], @@ -5314,7 +5314,7 @@ } } }, - "/storage/sugarsync": { + "/storage/s3/servercore": { "post": { "consumes": [ "application/json" @@ -5325,8 +5325,8 @@ "tags": [ "Storage" ], - "summary": "Create Sugarsync storage", - "operationId": "CreateSugarsyncStorage", + "summary": "Create S3 storage with Servercore - Servercore Object Storage", + "operationId": "CreateS3ServercoreStorage", "parameters": [ { "description": "Request body", @@ -5334,7 +5334,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSugarsyncStorageRequest" + "$ref": "#/definitions/storage.createS3ServercoreStorageRequest" } } ], @@ -5360,7 +5360,7 @@ } } }, - "/storage/swift": { + "/storage/s3/spectralogic": { "post": { "consumes": [ "application/json" @@ -5371,8 +5371,8 @@ "tags": [ "Storage" ], - "summary": "Create Swift storage", - "operationId": "CreateSwiftStorage", + "summary": "Create S3 storage with SpectraLogic - Spectra Logic Black Pearl", + "operationId": "CreateS3SpectraLogicStorage", "parameters": [ { "description": "Request body", @@ -5380,7 +5380,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSwiftStorageRequest" + "$ref": "#/definitions/storage.createS3SpectraLogicStorageRequest" } } ], @@ -5406,7 +5406,7 @@ } } }, - "/storage/union": { + "/storage/s3/stackpath": { "post": { "consumes": [ "application/json" @@ -5417,8 +5417,8 @@ "tags": [ "Storage" ], - "summary": "Create Union storage", - "operationId": "CreateUnionStorage", + "summary": "Create S3 storage with StackPath - StackPath Object Storage", + "operationId": "CreateS3StackPathStorage", "parameters": [ { "description": "Request body", @@ -5426,7 +5426,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createUnionStorageRequest" + "$ref": "#/definitions/storage.createS3StackPathStorageRequest" } } ], @@ -5452,7 +5452,7 @@ } } }, - "/storage/uptobox": { + "/storage/s3/storj": { "post": { "consumes": [ "application/json" @@ -5463,8 +5463,8 @@ "tags": [ "Storage" ], - "summary": "Create Uptobox storage", - "operationId": "CreateUptoboxStorage", + "summary": "Create S3 storage with Storj - Storj (S3 Compatible Gateway)", + "operationId": "CreateS3StorjStorage", "parameters": [ { "description": "Request body", @@ -5472,7 +5472,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createUptoboxStorageRequest" + "$ref": "#/definitions/storage.createS3StorjStorageRequest" } } ], @@ -5498,7 +5498,7 @@ } } }, - "/storage/webdav": { + "/storage/s3/synology": { "post": { "consumes": [ "application/json" @@ -5509,8 +5509,8 @@ "tags": [ "Storage" ], - "summary": "Create Webdav storage", - "operationId": "CreateWebdavStorage", + "summary": "Create S3 storage with Synology - Synology C2 Object Storage", + "operationId": "CreateS3SynologyStorage", "parameters": [ { "description": "Request body", @@ -5518,7 +5518,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createWebdavStorageRequest" + "$ref": "#/definitions/storage.createS3SynologyStorageRequest" } } ], @@ -5544,7 +5544,7 @@ } } }, - "/storage/yandex": { + "/storage/s3/tencentcos": { "post": { "consumes": [ "application/json" @@ -5555,8 +5555,8 @@ "tags": [ "Storage" ], - "summary": "Create Yandex storage", - "operationId": "CreateYandexStorage", + "summary": "Create S3 storage with TencentCOS - Tencent Cloud Object Storage (COS)", + "operationId": "CreateS3TencentCOSStorage", "parameters": [ { "description": "Request body", @@ -5564,7 +5564,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createYandexStorageRequest" + "$ref": "#/definitions/storage.createS3TencentCOSStorageRequest" } } ], @@ -5590,7 +5590,7 @@ } } }, - "/storage/zoho": { + "/storage/s3/wasabi": { "post": { "consumes": [ "application/json" @@ -5601,8 +5601,8 @@ "tags": [ "Storage" ], - "summary": "Create Zoho storage", - "operationId": "CreateZohoStorage", + "summary": "Create S3 storage with Wasabi - Wasabi Object Storage", + "operationId": "CreateS3WasabiStorage", "parameters": [ { "description": "Request body", @@ -5610,7 +5610,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createZohoStorageRequest" + "$ref": "#/definitions/storage.createS3WasabiStorageRequest" } } ], @@ -5636,25 +5636,36 @@ } } }, - "/storage/{name}": { - "delete": { + "/storage/s3/zata": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "tags": [ "Storage" ], - "summary": "Remove a storage", - "operationId": "RemoveStorage", + "summary": "Create S3 storage with Zata - Zata (S3 compatible Gateway)", + "operationId": "CreateS3ZataStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createS3ZataStorageRequest" + } } ], "responses": { - "204": { - "description": "No Content" + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } }, "400": { "description": "Bad Request", @@ -5669,8 +5680,10 @@ } } } - }, - "patch": { + } + }, + "/storage/seafile": { + "post": { "consumes": [ "application/json" ], @@ -5680,26 +5693,16 @@ "tags": [ "Storage" ], - "summary": "Update a storage connection", - "operationId": "UpdateStorage", + "summary": "Create Seafile storage", + "operationId": "CreateSeafileStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "description": "Configuration", - "name": "config", + "description": "Request body", + "name": "request", "in": "body", "required": true, "schema": { - "type": "object", - "additionalProperties": { - "type": "string" - } + "$ref": "#/definitions/storage.createSeafileStorageRequest" } } ], @@ -5725,8 +5728,8 @@ } } }, - "/storage/{name}/explore/{path}": { - "get": { + "/storage/sftp": { + "post": { "consumes": [ "application/json" ], @@ -5736,32 +5739,24 @@ "tags": [ "Storage" ], - "summary": "Explore directory entries in a storage system", - "operationId": "ExploreStorage", + "summary": "Create Sftp storage", + "operationId": "CreateSftpStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "Path in the storage system to explore", - "name": "path", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSftpStorageRequest" + } } ], "responses": { "200": { "description": "OK", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/storage.DirEntry" - } + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5779,8 +5774,8 @@ } } }, - "/storage/{name}/rename": { - "patch": { + "/storage/sharefile": { + "post": { "consumes": [ "application/json" ], @@ -5790,23 +5785,62 @@ "tags": [ "Storage" ], - "summary": "Rename a storage connection", - "operationId": "RenameStorage", + "summary": "Create Sharefile storage", + "operationId": "CreateSharefileStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSharefileStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/sia": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Sia storage", + "operationId": "CreateSiaStorage", + "parameters": [ { - "description": "New storage name", + "description": "Request body", "name": "request", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.RenameRequest" + "$ref": "#/definitions/storage.createSiaStorageRequest" } } ], @@ -5832,24 +5866,35 @@ } } }, - "/wallet": { - "get": { + "/storage/smb": { + "post": { + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "Wallet" + "Storage" + ], + "summary": "Create Smb storage", + "operationId": "CreateSmbStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSmbStorageRequest" + } + } ], - "summary": "List all imported wallets", - "operationId": "ListWallets", "responses": { "200": { "description": "OK", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Wallet" - } + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5865,7 +5910,9 @@ } } } - }, + } + }, + "/storage/storj/existing": { "post": { "consumes": [ "application/json" @@ -5874,10 +5921,10 @@ "application/json" ], "tags": [ - "Wallet" + "Storage" ], - "summary": "Import a private key", - "operationId": "ImportWallet", + "summary": "Create Storj storage with existing - Use an existing access grant.", + "operationId": "CreateStorjExistingStorage", "parameters": [ { "description": "Request body", @@ -5885,7 +5932,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/wallet.ImportKeystoreRequest" + "$ref": "#/definitions/storage.createStorjExistingStorageRequest" } } ], @@ -5893,7 +5940,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/model.Wallet" + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5911,25 +5958,36 @@ } } }, - "/wallet/{address}": { - "delete": { + "/storage/storj/new": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "tags": [ - "Wallet" + "Storage" ], - "summary": "Remove a wallet", - "operationId": "RemoveWallet", + "summary": "Create Storj storage with new - Create a new access grant from satellite address, API key, and passphrase.", + "operationId": "CreateStorjNewStorage", "parameters": [ { - "type": "string", - "description": "Address", - "name": "address", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createStorjNewStorageRequest" + } } ], "responses": { - "204": { - "description": "No Content" + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } }, "400": { "description": "Bad Request", @@ -5945,1476 +6003,7478 @@ } } } - } - }, + }, + "/storage/sugarsync": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Sugarsync storage", + "operationId": "CreateSugarsyncStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSugarsyncStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/swift": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Swift storage", + "operationId": "CreateSwiftStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSwiftStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/union": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Union storage", + "operationId": "CreateUnionStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createUnionStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/webdav": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Webdav storage", + "operationId": "CreateWebdavStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createWebdavStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/yandex": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Yandex storage", + "operationId": "CreateYandexStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createYandexStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/zoho": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Zoho storage", + "operationId": "CreateZohoStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createZohoStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/{name}": { + "delete": { + "tags": [ + "Storage" + ], + "summary": "Remove a storage", + "operationId": "RemoveStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + }, + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Update a storage connection", + "operationId": "UpdateStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "description": "Configuration", + "name": "config", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/{name}/explore/{path}": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Explore directory entries in a storage system", + "operationId": "ExploreStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path in the storage system to explore", + "name": "path", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/storage.DirEntry" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/{name}/rename": { + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Rename a storage connection", + "operationId": "RenameStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "description": "New storage name", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.RenameRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/wallet": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "Wallet" + ], + "summary": "List all imported wallets", + "operationId": "ListWallets", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Wallet" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + }, + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Wallet" + ], + "summary": "Import a private key", + "operationId": "ImportWallet", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/wallet.ImportKeystoreRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Wallet" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/wallet/{address}": { + "delete": { + "tags": [ + "Wallet" + ], + "summary": "Remove a wallet", + "operationId": "RemoveWallet", + "parameters": [ + { + "type": "string", + "description": "Address", + "name": "address", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + } + }, "definitions": { "admin.SetIdentityRequest": { "type": "object", "properties": { - "identity": { + "identity": { + "type": "string" + } + } + }, + "api.HTTPError": { + "type": "object", + "properties": { + "err": { + "type": "string" + } + } + }, + "dataprep.AddPieceRequest": { + "type": "object", + "required": [ + "pieceCid" + ], + "properties": { + "fileSize": { + "description": "File size of the CAR file, this is required for boost online deal", + "type": "integer" + }, + "pieceCid": { + "description": "CID of the piece", + "type": "string" + }, + "pieceSize": { + "description": "Size of the piece (required for external import, optional if piece exists in DB)", + "type": "string" + }, + "rootCid": { + "description": "Root CID of the CAR file, used to populate the label field of storage deal", + "type": "string" + } + } + }, + "dataprep.CreateRequest": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "deleteAfterExport": { + "description": "Whether to delete the source files after export", + "type": "boolean", + "default": false + }, + "maxSize": { + "description": "Maximum size of the CAR files to be created", + "type": "string", + "default": "31.5GiB" + }, + "minPieceSize": { + "description": "Minimum piece size for the preparation, applies only to DAG and remainer pieces", + "type": "string", + "default": "1MiB" + }, + "name": { + "description": "Name of the preparation", + "type": "string" + }, + "noDag": { + "description": "Whether to disable maintaining folder dag structure for the sources. If disabled, DagGen will not be possible and folders will not have an associated CID.", + "type": "boolean", + "default": false + }, + "noInline": { + "description": "Whether to disable inline storage for the preparation. Can save database space but requires at least one output storage.", + "type": "boolean", + "default": false + }, + "outputStorages": { + "description": "Name of Output storage systems to be used for the output", + "type": "array", + "items": { + "type": "string" + } + }, + "pieceSize": { + "description": "Target piece size of the CAR files used for piece commitment calculation", + "type": "string" + }, + "sourceStorages": { + "description": "Name of Source storage systems to be used for the source", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "dataprep.DeletePieceRequest": { + "type": "object", + "properties": { + "deleteCar": { + "description": "Delete the physical CAR file from storage (default: true)", + "type": "boolean" + }, + "force": { + "description": "Delete even if deals reference this piece", + "type": "boolean" + } + } + }, + "dataprep.DirEntry": { + "type": "object", + "properties": { + "cid": { + "type": "string" + }, + "fileVersions": { + "type": "array", + "items": { + "$ref": "#/definitions/dataprep.Version" + } + }, + "isDir": { + "type": "boolean" + }, + "path": { + "type": "string" + } + } + }, + "dataprep.ExploreResult": { + "type": "object", + "properties": { + "cid": { + "type": "string" + }, + "path": { + "type": "string" + }, + "subEntries": { + "type": "array", + "items": { + "$ref": "#/definitions/dataprep.DirEntry" + } + } + } + }, + "dataprep.PieceList": { + "type": "object", + "properties": { + "attachmentId": { + "type": "integer" + }, + "pieces": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Car" + } + }, + "source": { + "$ref": "#/definitions/model.Storage" + }, + "storageId": { + "type": "integer" + } + } + }, + "dataprep.RemoveRequest": { + "type": "object", + "properties": { + "removeCars": { + "type": "boolean" + } + } + }, + "dataprep.RenameRequest": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + } + } + }, + "dataprep.Version": { + "type": "object", + "properties": { + "cid": { + "type": "string" + }, + "hash": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "lastModified": { + "type": "string" + }, + "size": { + "type": "integer" + } + } + }, + "deal.ListDealRequest": { + "type": "object", + "properties": { + "dealTypes": { + "description": "deal type filter (market for f05, pdp for f41)", + "type": "array", + "items": { + "$ref": "#/definitions/model.DealType" + } + }, + "preparations": { + "description": "preparation ID or name filter", + "type": "array", + "items": { + "type": "string" + } + }, + "providers": { + "description": "provider filter", + "type": "array", + "items": { + "type": "string" + } + }, + "schedules": { + "description": "schedule id filter", + "type": "array", + "items": { + "type": "integer" + } + }, + "sources": { + "description": "source ID or name filter", + "type": "array", + "items": { + "type": "string" + } + }, + "states": { + "description": "state filter", + "type": "array", + "items": { + "$ref": "#/definitions/model.DealState" + } + } + } + }, + "deal.Proposal": { + "type": "object", + "properties": { + "clientAddress": { + "description": "Client address", + "type": "string" + }, + "duration": { + "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "type": "string", + "default": "12740h" + }, + "fileSize": { + "description": "File size in bytes for boost to fetch the CAR file", + "type": "integer" + }, + "httpHeaders": { + "description": "http headers to be passed with the request (i.e. key=value)", + "type": "array", + "items": { + "type": "string" + } + }, + "ipni": { + "description": "Whether the deal should be IPNI", + "type": "boolean", + "default": true + }, + "keepUnsealed": { + "description": "Whether the deal should be kept unsealed", + "type": "boolean", + "default": true + }, + "pieceCid": { + "description": "Piece CID", + "type": "string" + }, + "pieceSize": { + "description": "Piece size", + "type": "string" + }, + "pricePerDeal": { + "description": "Price in FIL per deal", + "type": "number", + "default": 0 + }, + "pricePerGb": { + "description": "Price in FIL per GiB", + "type": "number", + "default": 0 + }, + "pricePerGbEpoch": { + "description": "Price in FIL per GiB per epoch", + "type": "number", + "default": 0 + }, + "providerId": { + "description": "Provider ID", + "type": "string" + }, + "rootCid": { + "description": "Root CID that is required as part of the deal proposal, if empty, will be set to empty CID", + "type": "string", + "default": "bafkqaaa" + }, + "startDelay": { + "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "type": "string", + "default": "72h" + }, + "urlTemplate": { + "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "type": "string" + }, + "verified": { + "description": "Whether the deal should be verified", + "type": "boolean", + "default": true + } + } + }, + "file.DealsForFileRange": { + "type": "object", + "properties": { + "deals": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Deal" + } + }, + "fileRange": { + "$ref": "#/definitions/model.FileRange" + } + } + }, + "file.Info": { + "type": "object", + "properties": { + "path": { + "description": "Path to the new file, relative to the source", + "type": "string" + } + } + }, + "job.SourceStatus": { + "type": "object", + "properties": { + "attachmentId": { + "type": "integer" + }, + "jobs": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Job" + } + }, + "output": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Storage" + } + }, + "source": { + "$ref": "#/definitions/model.Storage" + }, + "storageId": { + "type": "integer" + } + } + }, + "model.Car": { + "type": "object", + "properties": { + "attachmentId": { + "type": "integer" + }, + "createdAt": { + "type": "string" + }, + "fileSize": { + "type": "integer" + }, + "id": { + "type": "integer" + }, + "jobId": { + "type": "integer" + }, + "minPieceSizePadding": { + "description": "MinPieceSizePadding tracks virtual padding for inline mode only. Inline: stores padding amount, PieceReader serves zeros virtually. Non-inline: always 0, literal zeros are written to CAR file for Curio TreeD compatibility.", + "type": "integer" + }, + "numOfFiles": { + "type": "integer" + }, + "pieceCid": { + "type": "string" + }, + "pieceSize": { + "type": "integer" + }, + "pieceType": { + "description": "PieceType indicates whether this is a data piece or DAG piece", + "type": "string" + }, + "preparationId": { + "description": "Association - SET NULL for fast prep deletion, async cleanup", + "type": "integer" + }, + "rootCid": { + "type": "string" + }, + "storageId": { + "type": "integer" + }, + "storagePath": { + "description": "StoragePath is the path to the CAR file inside the storage. If the StorageID is nil but StoragePath is not empty, it means the CAR file is stored at the local absolute path.", + "type": "string" + } + } + }, + "model.ClientConfig": { + "type": "object", + "properties": { + "caCert": { + "description": "Paths to CA certificate used to verify servers", + "type": "array", + "items": { + "type": "string" + } + }, + "clientCert": { + "description": "Path to Client SSL certificate (PEM) for mutual TLS auth", + "type": "string" + }, + "clientKey": { + "description": "Path to Client SSL private key (PEM) for mutual TLS auth", + "type": "string" + }, + "connectTimeout": { + "description": "HTTP Client Connect timeout", + "type": "integer" + }, + "disableHttp2": { + "description": "Disable HTTP/2 in the transport", + "type": "boolean" + }, + "disableHttpKeepAlives": { + "description": "Disable HTTP keep-alives and use each connection once.", + "type": "boolean" + }, + "expectContinueTimeout": { + "description": "Timeout when using expect / 100-continue in HTTP", + "type": "integer" + }, + "headers": { + "description": "Set HTTP header for all transactions", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "insecureSkipVerify": { + "description": "Do not verify the server SSL certificate (insecure)", + "type": "boolean" + }, + "lowlevelRetries": { + "description": "Maximum number of retries for low-level client errors. Default is 10 retries.", + "type": "integer" + }, + "noGzip": { + "description": "Don't set Accept-Encoding: gzip", + "type": "boolean" + }, + "retryBackoff": { + "description": "Constant backoff between retries. Default is 1s.", + "type": "integer" + }, + "retryBackoffExponential": { + "description": "Exponential backoff between retries. Default is 1.0.", + "type": "number" + }, + "retryDelay": { + "description": "Delay between retries. Default is 1s.", + "type": "integer" + }, + "retryMaxCount": { + "description": "Maximum number of retries. Default is 10 retries.", + "type": "integer" + }, + "scanConcurrency": { + "description": "Maximum number of concurrent scan requests. Default is 1.", + "type": "integer" + }, + "skipInaccessibleFile": { + "description": "Skip inaccessible files. Default is false.", + "type": "boolean" + }, + "timeout": { + "description": "IO idle timeout", + "type": "integer" + }, + "useServerModTime": { + "description": "Use server modified time instead of object metadata", + "type": "boolean" + }, + "userAgent": { + "description": "Set the user-agent to a specified string", + "type": "string" + } + } + }, + "model.ConfigMap": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "model.Deal": { + "type": "object", + "properties": { + "clientId": { + "type": "string" + }, + "createdAt": { + "type": "string" + }, + "dealId": { + "type": "integer" + }, + "dealType": { + "$ref": "#/definitions/model.DealType" + }, + "endEpoch": { + "type": "integer" + }, + "errorMessage": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "label": { + "type": "string" + }, + "lastVerifiedAt": { + "description": "LastVerifiedAt is the last time the deal was verified as active by the tracker", + "type": "string" + }, + "nextChallengeEpoch": { + "description": "NextChallengeEpoch is the next epoch when a challenge proof is due", + "type": "integer" + }, + "pieceCid": { + "type": "string" + }, + "pieceSize": { + "type": "integer" + }, + "price": { + "type": "string" + }, + "proofSetId": { + "description": "PDP-specific fields (only populated for DealTypePDP)", + "type": "integer" + }, + "proofSetLive": { + "description": "ProofSetLive indicates if the proof set is live (actively being challenged)", + "type": "boolean" + }, + "proposalId": { + "type": "string" + }, + "provider": { + "type": "string" + }, + "scheduleId": { + "description": "Associations", + "type": "integer" + }, + "sectorStartEpoch": { + "type": "integer" + }, + "startEpoch": { + "type": "integer" + }, + "state": { + "$ref": "#/definitions/model.DealState" + }, + "updatedAt": { + "type": "string" + }, + "verified": { + "type": "boolean" + }, + "walletId": { + "type": "integer" + } + } + }, + "model.DealState": { + "type": "string", + "enum": [ + "proposed", + "published", + "active", + "expired", + "proposal_expired", + "rejected", + "slashed", + "error" + ], + "x-enum-varnames": [ + "DealProposed", + "DealPublished", + "DealActive", + "DealExpired", + "DealProposalExpired", + "DealRejected", + "DealSlashed", + "DealErrored" + ] + }, + "model.DealType": { + "type": "string", + "enum": [ + "market", + "pdp" + ], + "x-enum-varnames": [ + "DealTypeMarket", + "DealTypePDP" + ] + }, + "model.File": { + "type": "object", + "properties": { + "attachmentId": { + "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", + "type": "integer" + }, + "cid": { + "description": "CID is the CID of the file.", + "type": "string" + }, + "directoryId": { + "type": "integer" + }, + "fileRanges": { + "type": "array", + "items": { + "$ref": "#/definitions/model.FileRange" + } + }, + "hash": { + "description": "Hash is the hash of the file.", + "type": "string" + }, + "id": { + "type": "integer" + }, + "lastModifiedNano": { + "type": "integer" + }, + "path": { + "description": "Path is the relative path to the file inside the storage.", + "type": "string" + }, + "size": { + "description": "Size is the size of the file in bytes.", + "type": "integer" + } + } + }, + "model.FileRange": { + "type": "object", + "properties": { + "cid": { + "description": "CID is the CID of the range.", + "type": "string" + }, + "fileId": { + "type": "integer" + }, + "id": { + "type": "integer" + }, + "jobId": { + "description": "Associations", + "type": "integer" + }, + "length": { + "description": "Length is the length of the range in bytes.", + "type": "integer" + }, + "offset": { + "description": "Offset is the offset of the range inside the file.", + "type": "integer" + } + } + }, + "model.Job": { + "type": "object", + "properties": { + "attachmentId": { + "type": "integer" + }, + "errorMessage": { + "type": "string" + }, + "errorStackTrace": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "state": { + "$ref": "#/definitions/model.JobState" + }, + "type": { + "$ref": "#/definitions/model.JobType" + }, + "workerId": { + "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", + "type": "string" + } + } + }, + "model.JobState": { + "type": "string", + "enum": [ + "created", + "ready", + "paused", + "processing", + "complete", + "error" + ], + "x-enum-varnames": [ + "Created", + "Ready", + "Paused", + "Processing", + "Complete", + "Error" + ] + }, + "model.JobType": { + "type": "string", + "enum": [ + "scan", + "pack", + "daggen" + ], + "x-enum-varnames": [ + "Scan", + "Pack", + "DagGen" + ] + }, + "model.Preparation": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "deleteAfterExport": { + "description": "DeleteAfterExport is a flag that indicates whether the source files should be deleted after export.", + "type": "boolean" + }, + "id": { + "type": "integer" + }, + "maxSize": { + "type": "integer" + }, + "minPieceSize": { + "description": "Minimum piece size for the preparation, applies only to DAG and remainder pieces", + "type": "integer" + }, + "name": { + "type": "string" + }, + "noDag": { + "type": "boolean" + }, + "noInline": { + "type": "boolean" + }, + "outputStorages": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Storage" + } + }, + "pieceSize": { + "type": "integer" + }, + "sourceStorages": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Storage" + } + }, + "updatedAt": { + "type": "string" + }, + "walletId": { + "description": "Associations", + "type": "integer" + } + } + }, + "model.Schedule": { + "type": "object", + "properties": { + "allowedPieceCids": { + "type": "array", + "items": { + "type": "string" + } + }, + "announceToIpni": { + "type": "boolean" + }, + "createdAt": { + "type": "string" + }, + "dealType": { + "$ref": "#/definitions/model.DealType" + }, + "duration": { + "type": "integer" + }, + "errorMessage": { + "type": "string" + }, + "force": { + "type": "boolean" + }, + "httpHeaders": { + "$ref": "#/definitions/model.ConfigMap" + }, + "id": { + "type": "integer" + }, + "keepUnsealed": { + "type": "boolean" + }, + "maxPendingDealNumber": { + "type": "integer" + }, + "maxPendingDealSize": { + "type": "integer" + }, + "notes": { + "type": "string" + }, + "preparationId": { + "description": "Associations", + "type": "integer" + }, + "pricePerDeal": { + "type": "number" + }, + "pricePerGb": { + "type": "number" + }, + "pricePerGbEpoch": { + "type": "number" + }, + "provider": { + "type": "string" + }, + "scheduleCron": { + "type": "string" + }, + "scheduleCronPerpetual": { + "type": "boolean" + }, + "scheduleDealNumber": { + "type": "integer" + }, + "scheduleDealSize": { + "type": "integer" + }, + "startDelay": { + "type": "integer" + }, + "state": { + "$ref": "#/definitions/model.ScheduleState" + }, + "totalDealNumber": { + "type": "integer" + }, + "totalDealSize": { + "type": "integer" + }, + "updatedAt": { + "type": "string" + }, + "urlTemplate": { + "type": "string" + }, + "verified": { + "type": "boolean" + } + } + }, + "model.ScheduleState": { + "type": "string", + "enum": [ + "active", + "paused", + "error", + "completed" + ], + "x-enum-varnames": [ + "ScheduleActive", + "SchedulePaused", + "ScheduleError", + "ScheduleCompleted" + ] + }, + "model.Storage": { + "type": "object", + "properties": { + "clientConfig": { + "description": "ClientConfig is the HTTP configuration for the storage, if applicable.", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "Config is a map of key-value pairs that can be used to store RClone options.", + "allOf": [ + { + "$ref": "#/definitions/model.ConfigMap" + } + ] + }, + "createdAt": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "path": { + "description": "Path is the path to the storage root.", + "type": "string" + }, + "preparationsAsOutput": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Preparation" + } + }, + "preparationsAsSource": { + "description": "Associations", + "type": "array", + "items": { + "$ref": "#/definitions/model.Preparation" + } + }, + "type": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "model.Wallet": { + "type": "object", + "properties": { + "actorId": { + "description": "nullable, links to on-chain actor f0...", + "type": "string" + }, + "address": { + "description": "filecoin address (f1.../f3...)", + "type": "string" + }, + "id": { + "type": "integer" + }, + "keyPath": { + "description": "absolute path to key file", + "type": "string" + }, + "keyStore": { + "description": "local, yubikey, aws-kms, etc", + "type": "string" + }, + "name": { + "description": "optional label", + "type": "string" + } + } + }, + "schedule.CreateRequest": { + "type": "object", + "properties": { + "allowedPieceCids": { + "description": "Allowed piece CIDs in this schedule", + "type": "array", + "items": { + "type": "string" + } + }, + "dealType": { + "description": "Deal type: market (f05) or pdp (f41)", + "type": "string" + }, + "duration": { + "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "type": "string", + "default": "12840h" + }, + "force": { + "description": "Force to send out deals regardless of replication restriction", + "type": "boolean" + }, + "httpHeaders": { + "description": "http headers to be passed with the request (i.e. key=value)", + "type": "array", + "items": { + "type": "string" + } + }, + "ipni": { + "description": "Whether the deal should be IPNI", + "type": "boolean", + "default": true + }, + "keepUnsealed": { + "description": "Whether the deal should be kept unsealed", + "type": "boolean", + "default": true + }, + "maxPendingDealNumber": { + "description": "Max pending deal number", + "type": "integer" + }, + "maxPendingDealSize": { + "description": "Max pending deal size in human readable format, i.e. 100 TiB", + "type": "string" + }, + "notes": { + "description": "Notes", + "type": "string" + }, + "preparation": { + "description": "Preparation ID or name", + "type": "string" + }, + "pricePerDeal": { + "description": "Price in FIL per deal", + "type": "number", + "default": 0 + }, + "pricePerGb": { + "description": "Price in FIL per GiB", + "type": "number", + "default": 0 + }, + "pricePerGbEpoch": { + "description": "Price in FIL per GiB per epoch", + "type": "number", + "default": 0 + }, + "provider": { + "description": "Provider", + "type": "string" + }, + "scheduleCron": { + "description": "Schedule cron pattern", + "type": "string" + }, + "scheduleCronPerpetual": { + "description": "Whether a cron schedule should run in definitely", + "type": "boolean" + }, + "scheduleDealNumber": { + "description": "Number of deals per scheduled time", + "type": "integer" + }, + "scheduleDealSize": { + "description": "Size of deals per schedule trigger in human readable format, i.e. 100 TiB", + "type": "string" + }, + "startDelay": { + "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "type": "string", + "default": "72h" + }, + "totalDealNumber": { + "description": "Total number of deals", + "type": "integer" + }, + "totalDealSize": { + "description": "Total size of deals in human readable format, i.e. 100 TiB", + "type": "string" + }, + "urlTemplate": { + "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "type": "string" + }, + "verified": { + "description": "Whether the deal should be verified", + "type": "boolean", + "default": true + } + } + }, + "schedule.UpdateRequest": { + "type": "object", + "properties": { + "allowedPieceCids": { + "description": "Allowed piece CIDs in this schedule", + "type": "array", + "items": { + "type": "string" + } + }, + "dealType": { + "description": "Deal type: market (f05) or pdp (f41)", + "type": "string" + }, + "duration": { + "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "type": "string", + "default": "12840h" + }, + "force": { + "description": "Force to send out deals regardless of replication restriction", + "type": "boolean" + }, + "httpHeaders": { + "description": "http headers to be passed with the request (i.e. key=value)", + "type": "array", + "items": { + "type": "string" + } + }, + "ipni": { + "description": "Whether the deal should be IPNI", + "type": "boolean", + "default": true + }, + "keepUnsealed": { + "description": "Whether the deal should be kept unsealed", + "type": "boolean", + "default": true + }, + "maxPendingDealNumber": { + "description": "Max pending deal number", + "type": "integer" + }, + "maxPendingDealSize": { + "description": "Max pending deal size in human readable format, i.e. 100 TiB", + "type": "string" + }, + "notes": { + "description": "Notes", + "type": "string" + }, + "pricePerDeal": { + "description": "Price in FIL per deal", + "type": "number", + "default": 0 + }, + "pricePerGb": { + "description": "Price in FIL per GiB", + "type": "number", + "default": 0 + }, + "pricePerGbEpoch": { + "description": "Price in FIL per GiB per epoch", + "type": "number", + "default": 0 + }, + "scheduleCron": { + "description": "Schedule cron pattern", + "type": "string" + }, + "scheduleCronPerpetual": { + "description": "Whether a cron schedule should run in definitely", + "type": "boolean" + }, + "scheduleDealNumber": { + "description": "Number of deals per scheduled time", + "type": "integer" + }, + "scheduleDealSize": { + "description": "Size of deals per schedule trigger in human readable format, i.e. 100 TiB", + "type": "string" + }, + "startDelay": { + "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "type": "string", + "default": "72h" + }, + "totalDealNumber": { + "description": "Total number of deals", + "type": "integer" + }, + "totalDealSize": { + "description": "Total size of deals in human readable format, i.e. 100 TiB", + "type": "string" + }, + "urlTemplate": { + "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "type": "string" + }, + "verified": { + "description": "Whether the deal should be verified", + "type": "boolean", + "default": true + } + } + }, + "storage.DirEntry": { + "type": "object", + "properties": { + "dirId": { + "type": "string" + }, + "hash": { + "type": "string" + }, + "isDir": { + "type": "boolean" + }, + "lastModified": { + "type": "string" + }, + "numItems": { + "type": "integer" + }, + "path": { + "type": "string" + }, + "size": { + "type": "integer" + } + } + }, + "storage.RenameRequest": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + } + } + }, + "storage.azureblobConfig": { + "type": "object", + "properties": { + "accessTier": { + "description": "Access tier of blob: hot, cool, cold or archive.", + "type": "string" + }, + "account": { + "description": "Azure Storage Account Name.", + "type": "string" + }, + "archiveTierDelete": { + "description": "Delete archive tier blobs before overwriting.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Upload chunk size.", + "type": "string", + "default": "4Mi" + }, + "clientCertificatePassword": { + "description": "Password for the certificate file (optional).", + "type": "string" + }, + "clientCertificatePath": { + "description": "Path to a PEM or PKCS12 certificate file including the private key.", + "type": "string" + }, + "clientId": { + "description": "The ID of the client in use.", + "type": "string" + }, + "clientSecret": { + "description": "One of the service principal's client secrets", + "type": "string" + }, + "clientSendCertificateChain": { + "description": "Send the certificate chain when using certificate auth.", + "type": "boolean", + "default": false + }, + "connectionString": { + "description": "Storage Connection String.", + "type": "string" + }, + "copyConcurrency": { + "description": "Concurrency for multipart copy.", + "type": "integer", + "default": 512 + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "8Mi" + }, + "deleteSnapshots": { + "description": "Set to specify how to deal with snapshots on blob deletion.", + "type": "string", + "example": "" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableInstanceDiscovery": { + "description": "Skip requesting Microsoft Entra instance metadata", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8" + }, + "endpoint": { + "description": "Endpoint for the service.", + "type": "string" + }, + "envAuth": { + "description": "Read credentials from runtime (environment variables, CLI or MSI).", + "type": "boolean", + "default": false + }, + "key": { + "description": "Storage Account Shared Key.", + "type": "string" + }, + "listChunk": { + "description": "Size of blob list.", + "type": "integer", + "default": 5000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "msiClientId": { + "description": "Object ID of the user-assigned MSI to use, if any.", + "type": "string" + }, + "msiMiResId": { + "description": "Azure resource ID of the user-assigned MSI to use, if any.", + "type": "string" + }, + "msiObjectId": { + "description": "Object ID of the user-assigned MSI to use, if any.", + "type": "string" + }, + "noCheckContainer": { + "description": "If set, don't attempt to check the container exists or create it.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "password": { + "description": "The user's password", + "type": "string" + }, + "publicAccess": { + "description": "Public access level of a container: blob or container.", + "type": "string", + "example": "" + }, + "sasUrl": { + "description": "SAS URL for container level access only.", + "type": "string" + }, + "servicePrincipalFile": { + "description": "Path to file containing credentials for use with a service principal.", + "type": "string" + }, + "tenant": { + "description": "ID of the service principal's tenant. Also called its directory ID.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 16 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload (\u003c= 256 MiB) (deprecated).", + "type": "string" + }, + "useAz": { + "description": "Use Azure CLI tool az for authentication", + "type": "boolean", + "default": false + }, + "useCopyBlob": { + "description": "Whether to use the Copy Blob API when copying to the same storage account.", + "type": "boolean", + "default": true + }, + "useEmulator": { + "description": "Uses local storage emulator if provided as 'true'.", + "type": "boolean", + "default": false + }, + "useMsi": { + "description": "Use a managed service identity to authenticate (only works in Azure).", + "type": "boolean", + "default": false + }, + "username": { + "description": "User name (usually an email address)", + "type": "string" + } + } + }, + "storage.b2Config": { + "type": "object", + "properties": { + "account": { + "description": "Account ID or Application Key ID.", + "type": "string" + }, + "chunkSize": { + "description": "Upload chunk size.", + "type": "string", + "default": "96Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4Gi" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Disable checksums for large (\u003e upload cutoff) files.", + "type": "boolean", + "default": false + }, + "downloadAuthDuration": { + "description": "Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.", + "type": "string", + "default": "1w" + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for the service.", + "type": "string" + }, + "hardDelete": { + "description": "Permanently delete files on remote removal, otherwise hide files.", + "type": "boolean", + "default": false + }, + "key": { + "description": "Application Key.", + "type": "string" + }, + "lifecycle": { + "description": "Set the number of days deleted files should be kept when creating a bucket.", + "type": "integer", + "default": 0 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data", + "type": "string", + "example": "" + }, + "sseCustomerKeyBase64": { + "description": "To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data", + "type": "string", + "example": "" + }, + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" + }, + "testMode": { + "description": "A flag string for X-Bz-Test-Mode header for debugging.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false + } + } + }, + "storage.boxConfig": { + "type": "object", + "properties": { + "accessToken": { + "description": "Box App Primary Access Token", + "type": "string" + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "boxConfigFile": { + "description": "Box App config.json location", + "type": "string" + }, + "boxSubType": { + "type": "string", + "default": "user", + "example": "user" + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "commitRetries": { + "description": "Max number of times to try committing a multipart file.", + "type": "integer", + "default": 100 + }, + "configCredentials": { + "description": "Box App config.json contents.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot" + }, + "impersonate": { + "description": "Impersonate this user ID when using a service account.", + "type": "string" + }, + "listChunk": { + "description": "Size of listing chunk 1-1000.", + "type": "integer", + "default": 1000 + }, + "ownedBy": { + "description": "Only show items owned by the login (email address) passed in.", + "type": "string" + }, + "rootFolderId": { + "description": "Fill in for rclone to use a non root folder as its starting point.", + "type": "string", + "default": "0" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "uploadCutoff": { + "description": "Cutoff for switching to multipart upload (\u003e= 50 MiB).", + "type": "string", + "default": "50Mi" + } + } + }, + "storage.createAzureblobStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.azureblobConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createB2StorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.b2Config" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createBoxStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.boxConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createDriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.driveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createDropboxStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.dropboxConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createFichierStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.fichierConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createFilefabricStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.filefabricConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createFtpStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.ftpConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createGcsStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.gcsConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createGphotosStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.gphotosConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createHdfsStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.hdfsConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createHidriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.hidriveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createHttpStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.httpConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createInternetarchiveStorageRequest": { + "type": "object" + }, + "storage.createJottacloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.jottacloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createKoofrDigistorageStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.koofrDigistorageConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createKoofrKoofrStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.koofrKoofrConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createKoofrOtherStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.koofrOtherConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createLocalStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.localConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createMailruStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.mailruConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createMegaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.megaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createNetstorageStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.netstorageConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOnedriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.onedriveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosEnv_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosEnv_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosInstance_principal_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosInstance_principal_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosNo_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosNo_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosResource_principal_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosResource_principal_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosUser_principal_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosUser_principal_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOosWorkload_identity_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosWorkload_identity_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createOpendriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.opendriveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createPcloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.pcloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createPremiumizemeStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.premiumizemeConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createPutioStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.putioConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createQingstorStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.qingstorConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3AWSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3AWSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3AlibabaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3AlibabaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ArvanCloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ArvanCloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3BizflyCloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3BizflyCloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3CephStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3CephConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ChinaMobileStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ChinaMobileConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3CloudflareStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3CloudflareConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3CubbitStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3CubbitConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3DigitalOceanStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3DigitalOceanConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3DreamhostStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3DreamhostConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ExabaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ExabaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3FileLuStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3FileLuConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3FlashBladeStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3FlashBladeConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3GCSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3GCSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3HetznerStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3HetznerConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3HuaweiOBSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3HuaweiOBSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3IBMCOSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3IBMCOSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3IDriveStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3IDriveConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3IONOSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3IONOSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3IntercoloStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3IntercoloConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3LeviiaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LeviiaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3LiaraStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LiaraConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3LinodeStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LinodeConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3LyveCloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LyveCloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3MagaluStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3MagaluConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3MegaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3MegaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3MinioStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3MinioConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3NeteaseStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3NeteaseConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3OVHcloudStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3OVHcloudConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3OtherStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3OtherConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3OutscaleStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3OutscaleConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3PetaboxStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3PetaboxConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3QiniuStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3QiniuConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3RabataStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3RabataConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3RackCorpStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3RackCorpConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3RcloneStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3RcloneConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ScalewayStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ScalewayConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3SeaweedFSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SeaweedFSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3SelectelStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SelectelConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ServercoreStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ServercoreConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3SpectraLogicStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SpectraLogicConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3StackPathStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3StackPathConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3StorjStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3StorjConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3SynologyStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SynologyConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3TencentCOSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3TencentCOSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3WasabiStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3WasabiConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createS3ZataStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3ZataConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSeafileStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.seafileConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSftpStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.sftpConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSharefileStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.sharefileConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSiaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.siaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSmbStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.smbConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createStorjExistingStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.storjExistingConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createStorjNewStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.storjNewConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSugarsyncStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.sugarsyncConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createSwiftStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.swiftConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createUnionStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.unionConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createWebdavStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.webdavConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createYandexStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.yandexConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.createZohoStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.zohoConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, + "storage.driveConfig": { + "type": "object", + "properties": { + "acknowledgeAbuse": { + "description": "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.", + "type": "boolean", + "default": false + }, + "allowImportNameChange": { + "description": "Allow the filetype to change when uploading Google docs.", + "type": "boolean", + "default": false + }, + "alternateExport": { + "description": "Deprecated: No longer needed.", + "type": "boolean", + "default": false + }, + "authOwnerOnly": { + "description": "Only consider files owned by the authenticated user.", + "type": "boolean", + "default": false + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "chunkSize": { + "description": "Upload chunk size.", + "type": "string", + "default": "8Mi" + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "Google Application Client Id", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "copyShortcutContent": { + "description": "Server side copy contents of shortcuts instead of the shortcut.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableHttp2": { + "description": "Disable drive using http2.", + "type": "boolean", + "default": true + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "InvalidUtf8" + }, + "envAuth": { + "description": "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "exportFormats": { + "description": "Comma separated list of preferred formats for downloading Google docs.", + "type": "string", + "default": "docx,xlsx,pptx,svg" + }, + "fastListBugFix": { + "description": "Work around a bug in Google Drive listing.", + "type": "boolean", + "default": true + }, + "formats": { + "description": "Deprecated: See export_formats.", + "type": "string" + }, + "impersonate": { + "description": "Impersonate this user when using a service account.", + "type": "string" + }, + "importFormats": { + "description": "Comma separated list of preferred formats for uploading Google docs.", + "type": "string" + }, + "keepRevisionForever": { + "description": "Keep new head revision of each file forever.", + "type": "boolean", + "default": false + }, + "listChunk": { + "description": "Size of listing chunk 100-1000, 0 to disable.", + "type": "integer", + "default": 1000 + }, + "metadataEnforceExpansiveAccess": { + "description": "Whether the request should enforce expansive access rules.", + "type": "boolean", + "default": false + }, + "metadataLabels": { + "description": "Control whether labels should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "metadataOwner": { + "description": "Control whether owner should be read or written in metadata.", + "type": "string", + "default": "read", + "example": "off" + }, + "metadataPermissions": { + "description": "Control whether permissions should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "pacerBurst": { + "description": "Number of API calls to allow without sleeping.", + "type": "integer", + "default": 100 + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "100ms" + }, + "resourceKey": { + "description": "Resource key for accessing a link-shared file.", + "type": "string" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "scope": { + "description": "Comma separated list of scopes that rclone should use when requesting access from drive.", + "type": "string", + "example": "drive" + }, + "serverSideAcrossConfigs": { + "description": "Deprecated: use --server-side-across-configs instead.", + "type": "boolean", + "default": false + }, + "serviceAccountCredentials": { + "description": "Service Account Credentials JSON blob.", + "type": "string" + }, + "serviceAccountFile": { + "description": "Service Account Credentials JSON file path.", + "type": "string" + }, + "sharedWithMe": { + "description": "Only show files that are shared with me.", + "type": "boolean", + "default": false + }, + "showAllGdocs": { + "description": "Show all Google Docs including non-exportable ones in listings.", + "type": "boolean", + "default": false + }, + "sizeAsQuota": { + "description": "Show sizes as storage quota usage, not actual size.", + "type": "boolean", + "default": false + }, + "skipChecksumGphotos": { + "description": "Skip checksums on Google photos and videos only.", + "type": "boolean", + "default": false + }, + "skipDanglingShortcuts": { + "description": "If set skip dangling shortcut files.", + "type": "boolean", + "default": false + }, + "skipGdocs": { + "description": "Skip google documents in all listings.", + "type": "boolean", + "default": false + }, + "skipShortcuts": { + "description": "If set skip shortcut files.", + "type": "boolean", + "default": false + }, + "starredOnly": { + "description": "Only show files that are starred.", + "type": "boolean", + "default": false + }, + "stopOnDownloadLimit": { + "description": "Make download limit errors be fatal.", + "type": "boolean", + "default": false + }, + "stopOnUploadLimit": { + "description": "Make upload limit errors be fatal.", + "type": "boolean", + "default": false + }, + "teamDrive": { + "description": "ID of the Shared Drive (Team Drive).", + "type": "string" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "trashedOnly": { + "description": "Only show files that are in the trash.", + "type": "boolean", + "default": false + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "8Mi" + }, + "useCreatedDate": { + "description": "Use file created date instead of modified date.", + "type": "boolean", + "default": false + }, + "useSharedDate": { + "description": "Use date file was shared instead of modified date.", + "type": "boolean", + "default": false + }, + "useTrash": { + "description": "Send files to the trash instead of deleting permanently.", + "type": "boolean", + "default": true + }, + "v2DownloadMinSize": { + "description": "If Object's are greater, use drive v2 API to download.", + "type": "string", + "default": "off" + } + } + }, + "storage.dropboxConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "batchCommitTimeout": { + "description": "Max time to wait for a batch to finish committing. (no longer used)", + "type": "string", + "default": "10m0s" + }, + "batchMode": { + "description": "Upload file batching sync|async|off.", + "type": "string", + "default": "sync" + }, + "batchSize": { + "description": "Max number of files in upload batch.", + "type": "integer", + "default": 0 + }, + "batchTimeout": { + "description": "Max time to allow an idle upload batch before uploading.", + "type": "string", + "default": "0s" + }, + "chunkSize": { + "description": "Upload chunk size (\u003c 150Mi).", + "type": "string", + "default": "48Mi" + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot" + }, + "exportFormats": { + "description": "Comma separated list of preferred formats for exporting files", + "type": "string", + "default": "html,md" + }, + "impersonate": { + "description": "Impersonate this user when using a business account.", + "type": "string" + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "10ms" + }, + "rootNamespace": { + "description": "Specify a different Dropbox namespace ID to use as the root for all paths.", + "type": "string" + }, + "sharedFiles": { + "description": "Instructs rclone to work on individual shared files.", + "type": "boolean", + "default": false + }, + "sharedFolders": { + "description": "Instructs rclone to work on shared folders.", + "type": "boolean", + "default": false + }, + "showAllExports": { + "description": "Show all exportable files in listings.", + "type": "boolean", + "default": false + }, + "skipExports": { + "description": "Skip exportable files in all listings.", + "type": "boolean", + "default": false + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", "type": "string" } } }, - "api.HTTPError": { + "storage.fichierConfig": { "type": "object", "properties": { - "err": { + "apiKey": { + "description": "Your API Key, get it from https://1fichier.com/console/params.pl.", + "type": "string" + }, + "cdn": { + "description": "Set if you wish to use CDN download links.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot" + }, + "filePassword": { + "description": "If you want to download a shared file that is password protected, add this parameter.", + "type": "string" + }, + "folderPassword": { + "description": "If you want to list the files in a shared folder that is password protected, add this parameter.", + "type": "string" + }, + "sharedFolder": { + "description": "If you want to download a shared folder, add this parameter.", + "type": "string" + } + } + }, + "storage.filefabricConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Del,Ctl,InvalidUtf8,Dot" + }, + "permanentToken": { + "description": "Permanent Authentication Token.", + "type": "string" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "token": { + "description": "Session Token.", + "type": "string" + }, + "tokenExpiry": { + "description": "Token expiry time.", + "type": "string" + }, + "url": { + "description": "URL of the Enterprise File Fabric to connect to.", + "type": "string", + "example": "https://storagemadeeasy.com" + }, + "version": { + "description": "Version read from the file fabric.", + "type": "string" + } + } + }, + "storage.ftpConfig": { + "type": "object", + "properties": { + "allowInsecureTlsCiphers": { + "description": "Allow insecure TLS ciphers", + "type": "boolean", + "default": false + }, + "askPassword": { + "description": "Allow asking for FTP password when needed.", + "type": "boolean", + "default": false + }, + "closeTimeout": { + "description": "Maximum time to wait for a response to close.", + "type": "string", + "default": "1m0s" + }, + "concurrency": { + "description": "Maximum number of FTP simultaneous connections, 0 for unlimited.", + "type": "integer", + "default": 0 + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableEpsv": { + "description": "Disable using EPSV even if server advertises support.", + "type": "boolean", + "default": false + }, + "disableMlsd": { + "description": "Disable using MLSD even if server advertises support.", + "type": "boolean", + "default": false + }, + "disableTls13": { + "description": "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", + "type": "boolean", + "default": false + }, + "disableUtf8": { + "description": "Disable using UTF-8 even if server advertises support.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Del,Ctl,RightSpace,Dot", + "example": "Asterisk,Ctl,Dot,Slash" + }, + "explicitTls": { + "description": "Use Explicit FTPS (FTP over TLS).", + "type": "boolean", + "default": false + }, + "forceListHidden": { + "description": "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", + "type": "boolean", + "default": false + }, + "host": { + "description": "FTP host to connect to.", + "type": "string" + }, + "httpProxy": { + "description": "URL for HTTP CONNECT proxy", + "type": "string" + }, + "idleTimeout": { + "description": "Max time before closing idle connections.", + "type": "string", + "default": "1m0s" + }, + "noCheckCertificate": { + "description": "Do not verify the TLS certificate of the server.", + "type": "boolean", + "default": false + }, + "noCheckUpload": { + "description": "Don't check the upload is OK", + "type": "boolean", + "default": false + }, + "pass": { + "description": "FTP password.", + "type": "string" + }, + "port": { + "description": "FTP port number.", + "type": "integer", + "default": 21 + }, + "shutTimeout": { + "description": "Maximum time to wait for data connection closing status.", + "type": "string", + "default": "1m0s" + }, + "socksProxy": { + "description": "Socks 5 proxy host.", "type": "string" + }, + "tls": { + "description": "Use Implicit FTPS (FTP over TLS).", + "type": "boolean", + "default": false + }, + "tlsCacheSize": { + "description": "Size of TLS session cache for all control and data connections.", + "type": "integer", + "default": 32 + }, + "user": { + "description": "FTP username.", + "type": "string", + "default": "$USER" + }, + "writingMdtm": { + "description": "Use MDTM to set modification time (VsFtpd quirk)", + "type": "boolean", + "default": false } } }, - "dataprep.AddPieceRequest": { + "storage.gcsConfig": { "type": "object", - "required": [ - "pieceCid" - ], "properties": { - "fileSize": { - "description": "File size of the CAR file, this is required for boost online deal", - "type": "integer" + "accessToken": { + "description": "Short-lived access token.", + "type": "string" }, - "pieceCid": { - "description": "CID of the piece", + "anonymous": { + "description": "Access public buckets and objects without credentials.", + "type": "boolean", + "default": false + }, + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "pieceSize": { - "description": "Size of the piece (required for external import, optional if piece exists in DB)", + "bucketAcl": { + "description": "Access Control List for new buckets.", + "type": "string", + "example": "authenticatedRead" + }, + "bucketPolicyOnly": { + "description": "Access checks should use bucket-level IAM policies.", + "type": "boolean", + "default": false + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "rootCid": { - "description": "Root CID of the CAR file, used to populate the label field of storage deal", + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,CrLf,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Custom endpoint for the storage API. Leave blank to use the provider default.", + "type": "string", + "example": "storage.example.org" + }, + "envAuth": { + "description": "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "location": { + "description": "Location for the newly created buckets.", + "type": "string", + "example": "" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "objectAcl": { + "description": "Access Control List for new objects.", + "type": "string", + "example": "authenticatedRead" + }, + "projectNumber": { + "description": "Project number.", + "type": "string" + }, + "serviceAccountCredentials": { + "description": "Service Account Credentials JSON blob.", + "type": "string" + }, + "serviceAccountFile": { + "description": "Service Account Credentials JSON file path.", + "type": "string" + }, + "storageClass": { + "description": "The storage class to use when storing objects in Google Cloud Storage.", + "type": "string", + "example": "" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "userProject": { + "description": "User project.", "type": "string" } } }, - "dataprep.CreateRequest": { + "storage.gphotosConfig": { "type": "object", - "required": [ - "name" - ], "properties": { - "deleteAfterExport": { - "description": "Whether to delete the source files after export", + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "batchCommitTimeout": { + "description": "Max time to wait for a batch to finish committing. (no longer used)", + "type": "string", + "default": "10m0s" + }, + "batchMode": { + "description": "Upload file batching sync|async|off.", + "type": "string", + "default": "sync" + }, + "batchSize": { + "description": "Max number of files in upload batch.", + "type": "integer", + "default": 0 + }, + "batchTimeout": { + "description": "Max time to allow an idle upload batch before uploading.", + "type": "string", + "default": "0s" + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", "type": "boolean", "default": false }, - "maxSize": { - "description": "Maximum size of the CAR files to be created", - "type": "string", - "default": "31.5GiB" + "clientId": { + "description": "OAuth Client Id.", + "type": "string" }, - "minPieceSize": { - "description": "Minimum piece size for the preparation, applies only to DAG and remainer pieces", + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "default": "1MiB" + "default": "Slash,CrLf,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the preparation", + "includeArchived": { + "description": "Also view and download archived media.", + "type": "boolean", + "default": false + }, + "proxy": { + "description": "Use the gphotosdl proxy for downloading the full resolution images", "type": "string" }, - "noDag": { - "description": "Whether to disable maintaining folder dag structure for the sources. If disabled, DagGen will not be possible and folders will not have an associated CID.", + "readOnly": { + "description": "Set to make the Google Photos backend read only.", "type": "boolean", "default": false }, - "noInline": { - "description": "Whether to disable inline storage for the preparation. Can save database space but requires at least one output storage.", + "readSize": { + "description": "Set to read the size of media items.", "type": "boolean", "default": false }, - "outputStorages": { - "description": "Name of Output storage systems to be used for the output", - "type": "array", - "items": { - "type": "string" - } + "startYear": { + "description": "Year limits the photos to be downloaded to those which are uploaded after the given year.", + "type": "integer", + "default": 2000 }, - "pieceSize": { - "description": "Target piece size of the CAR files used for piece commitment calculation", + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "sourceStorages": { - "description": "Name of Source storage systems to be used for the source", - "type": "array", - "items": { - "type": "string" - } + "tokenUrl": { + "description": "Token server url.", + "type": "string" } } }, - "dataprep.DeletePieceRequest": { + "storage.hdfsConfig": { "type": "object", "properties": { - "deleteCar": { - "description": "Delete the physical CAR file from storage (default: true)", - "type": "boolean" + "dataTransferProtection": { + "description": "Kerberos data transfer protection: authentication|integrity|privacy.", + "type": "string", + "example": "privacy" }, - "force": { - "description": "Delete even if deals reference this piece", - "type": "boolean" + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Colon,Del,Ctl,InvalidUtf8,Dot" + }, + "namenode": { + "description": "Hadoop name nodes and ports.", + "type": "string" + }, + "servicePrincipalName": { + "description": "Kerberos service principal name for the namenode.", + "type": "string" + }, + "username": { + "description": "Hadoop user name.", + "type": "string", + "example": "root" } } }, - "dataprep.DirEntry": { + "storage.hidriveConfig": { "type": "object", "properties": { - "cid": { + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "fileVersions": { - "type": "array", - "items": { - "$ref": "#/definitions/dataprep.Version" - } + "chunkSize": { + "description": "Chunksize for chunked uploads.", + "type": "string", + "default": "48Mi" }, - "isDir": { - "type": "boolean" + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false }, - "path": { + "clientId": { + "description": "OAuth Client Id.", "type": "string" - } - } - }, - "dataprep.ExploreResult": { - "type": "object", - "properties": { - "cid": { + }, + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "path": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "subEntries": { - "type": "array", - "items": { - "$ref": "#/definitions/dataprep.DirEntry" - } - } - } - }, - "dataprep.PieceList": { - "type": "object", - "properties": { - "attachmentId": { - "type": "integer" + "disableFetchingMemberCount": { + "description": "Do not fetch number of objects in directories unless it is absolutely necessary.", + "type": "boolean", + "default": false }, - "pieces": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Car" - } + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Dot" }, - "source": { - "$ref": "#/definitions/model.Storage" + "endpoint": { + "description": "Endpoint for the service.", + "type": "string", + "default": "https://api.hidrive.strato.com/2.1" }, - "storageId": { - "type": "integer" - } - } - }, - "dataprep.RemoveRequest": { - "type": "object", - "properties": { - "removeCars": { - "type": "boolean" - } - } - }, - "dataprep.RenameRequest": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - } - } - }, - "dataprep.Version": { - "type": "object", - "properties": { - "cid": { - "type": "string" + "rootPrefix": { + "description": "The root/parent folder for all paths.", + "type": "string", + "default": "/", + "example": "/" }, - "hash": { - "type": "string" + "scopeAccess": { + "description": "Access permissions that rclone should use when requesting access from HiDrive.", + "type": "string", + "default": "rw", + "example": "rw" }, - "id": { - "type": "integer" + "scopeRole": { + "description": "User-level that rclone should use when requesting access from HiDrive.", + "type": "string", + "default": "user", + "example": "user" }, - "lastModified": { + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "size": { - "type": "integer" + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for chunked uploads.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff/Threshold for chunked uploads.", + "type": "string", + "default": "96Mi" } } }, - "deal.ListDealRequest": { + "storage.httpConfig": { "type": "object", "properties": { - "dealTypes": { - "description": "deal type filter (market for f05, pdp for f41)", - "type": "array", - "items": { - "$ref": "#/definitions/model.DealType" - } + "description": { + "description": "Description of the remote.", + "type": "string" }, - "preparations": { - "description": "preparation ID or name filter", - "type": "array", - "items": { - "type": "string" - } + "headers": { + "description": "Set HTTP headers for all transactions.", + "type": "string" }, - "providers": { - "description": "provider filter", - "type": "array", - "items": { - "type": "string" - } + "noEscape": { + "description": "Do not escape URL metacharacters in path names.", + "type": "boolean", + "default": false }, - "schedules": { - "description": "schedule id filter", - "type": "array", - "items": { - "type": "integer" - } + "noHead": { + "description": "Don't use HEAD requests.", + "type": "boolean", + "default": false }, - "sources": { - "description": "source ID or name filter", - "type": "array", - "items": { - "type": "string" - } + "noSlash": { + "description": "Set this if the site doesn't end directories with /.", + "type": "boolean", + "default": false }, - "states": { - "description": "state filter", - "type": "array", - "items": { - "$ref": "#/definitions/model.DealState" - } + "url": { + "description": "URL of HTTP host to connect to.", + "type": "string" } } }, - "deal.Proposal": { + "storage.jottacloudConfig": { "type": "object", "properties": { - "clientAddress": { - "description": "Client address", + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "duration": { - "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", - "type": "string", - "default": "12740h" - }, - "fileSize": { - "description": "File size in bytes for boost to fetch the CAR file", - "type": "integer" - }, - "httpHeaders": { - "description": "http headers to be passed with the request (i.e. key=value)", - "type": "array", - "items": { - "type": "string" - } - }, - "ipni": { - "description": "Whether the deal should be IPNI", - "type": "boolean", - "default": true - }, - "keepUnsealed": { - "description": "Whether the deal should be kept unsealed", + "clientCredentials": { + "description": "Use client credentials OAuth flow.", "type": "boolean", - "default": true + "default": false }, - "pieceCid": { - "description": "Piece CID", + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "pieceSize": { - "description": "Piece size", + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "pricePerDeal": { - "description": "Price in FIL per deal", - "type": "number", - "default": 0 - }, - "pricePerGb": { - "description": "Price in FIL per GiB", - "type": "number", - "default": 0 + "description": { + "description": "Description of the remote.", + "type": "string" }, - "pricePerGbEpoch": { - "description": "Price in FIL per GiB per epoch", - "type": "number", - "default": 0 + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot" }, - "providerId": { - "description": "Provider ID", - "type": "string" + "hardDelete": { + "description": "Delete files permanently rather than putting them into the trash.", + "type": "boolean", + "default": false }, - "rootCid": { - "description": "Root CID that is required as part of the deal proposal, if empty, will be set to empty CID", + "md5MemoryLimit": { + "description": "Files bigger than this will be cached on disk to calculate the MD5 if required.", "type": "string", - "default": "bafkqaaa" + "default": "10Mi" }, - "startDelay": { - "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", - "type": "string", - "default": "72h" + "noVersions": { + "description": "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", + "type": "boolean", + "default": false }, - "urlTemplate": { - "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "verified": { - "description": "Whether the deal should be verified", + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "trashedOnly": { + "description": "Only show files that are in the trash.", "type": "boolean", - "default": true - } - } - }, - "file.DealsForFileRange": { - "type": "object", - "properties": { - "deals": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Deal" - } + "default": false }, - "fileRange": { - "$ref": "#/definitions/model.FileRange" + "uploadResumeLimit": { + "description": "Files bigger than this can be resumed if the upload fail's.", + "type": "string", + "default": "10Mi" } } }, - "file.Info": { + "storage.koofrDigistorageConfig": { "type": "object", "properties": { - "path": { - "description": "Path to the new file, relative to the source", + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "job.SourceStatus": { - "type": "object", - "properties": { - "attachmentId": { - "type": "integer" }, - "jobs": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Job" - } + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "output": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Storage" - } + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" }, - "source": { - "$ref": "#/definitions/model.Storage" + "password": { + "description": "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.", + "type": "string" }, - "storageId": { - "type": "integer" + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true + }, + "user": { + "description": "Your user name.", + "type": "string" } } }, - "model.Car": { + "storage.koofrKoofrConfig": { "type": "object", "properties": { - "attachmentId": { - "type": "integer" - }, - "createdAt": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "fileSize": { - "type": "integer" - }, - "id": { - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "jobId": { - "type": "integer" + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" }, - "minPieceSizePadding": { - "description": "MinPieceSizePadding tracks virtual padding for inline mode only. Inline: stores padding amount, PieceReader serves zeros virtually. Non-inline: always 0, literal zeros are written to CAR file for Curio TreeD compatibility.", - "type": "integer" + "password": { + "description": "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.", + "type": "string" }, - "numOfFiles": { - "type": "integer" + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true }, - "pieceCid": { + "user": { + "description": "Your user name.", + "type": "string" + } + } + }, + "storage.koofrOtherConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "pieceSize": { - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "pieceType": { - "description": "PieceType indicates whether this is a data piece or DAG piece", + "endpoint": { + "description": "The Koofr API endpoint to use.", "type": "string" }, - "preparationId": { - "description": "Association - SET NULL for fast prep deletion, async cleanup", - "type": "integer" + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" }, - "rootCid": { + "password": { + "description": "Your password for rclone (generate one at your service's settings page).", "type": "string" }, - "storageId": { - "type": "integer" + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true }, - "storagePath": { - "description": "StoragePath is the path to the CAR file inside the storage. If the StorageID is nil but StoragePath is not empty, it means the CAR file is stored at the local absolute path.", + "user": { + "description": "Your user name.", "type": "string" } } }, - "model.ClientConfig": { + "storage.localConfig": { "type": "object", "properties": { - "caCert": { - "description": "Paths to CA certificate used to verify servers", - "type": "array", - "items": { - "type": "string" - } - }, - "clientCert": { - "description": "Path to Client SSL certificate (PEM) for mutual TLS auth", - "type": "string" + "caseInsensitive": { + "description": "Force the filesystem to report itself as case insensitive.", + "type": "boolean", + "default": false }, - "clientKey": { - "description": "Path to Client SSL private key (PEM) for mutual TLS auth", - "type": "string" + "caseSensitive": { + "description": "Force the filesystem to report itself as case sensitive.", + "type": "boolean", + "default": false }, - "connectTimeout": { - "description": "HTTP Client Connect timeout", - "type": "integer" + "copyLinks": { + "description": "Follow symlinks and copy the pointed to item.", + "type": "boolean", + "default": false }, - "disableHttp2": { - "description": "Disable HTTP/2 in the transport", - "type": "boolean" + "description": { + "description": "Description of the remote.", + "type": "string" }, - "disableHttpKeepAlives": { - "description": "Disable HTTP keep-alives and use each connection once.", - "type": "boolean" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Dot" }, - "expectContinueTimeout": { - "description": "Timeout when using expect / 100-continue in HTTP", - "type": "integer" + "hashes": { + "description": "Comma separated list of supported checksum types.", + "type": "string" }, - "headers": { - "description": "Set HTTP header for all transactions", - "type": "object", - "additionalProperties": { - "type": "string" - } + "links": { + "description": "Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend.", + "type": "boolean", + "default": false }, - "insecureSkipVerify": { - "description": "Do not verify the server SSL certificate (insecure)", - "type": "boolean" + "noCheckUpdated": { + "description": "Don't check to see if the files change during upload.", + "type": "boolean", + "default": false }, - "lowlevelRetries": { - "description": "Maximum number of retries for low-level client errors. Default is 10 retries.", - "type": "integer" + "noClone": { + "description": "Disable reflink cloning for server-side copies.", + "type": "boolean", + "default": false }, - "noGzip": { - "description": "Don't set Accept-Encoding: gzip", - "type": "boolean" + "noPreallocate": { + "description": "Disable preallocation of disk space for transferred files.", + "type": "boolean", + "default": false }, - "retryBackoff": { - "description": "Constant backoff between retries. Default is 1s.", - "type": "integer" + "noSetModtime": { + "description": "Disable setting modtime.", + "type": "boolean", + "default": false }, - "retryBackoffExponential": { - "description": "Exponential backoff between retries. Default is 1.0.", - "type": "number" + "noSparse": { + "description": "Disable sparse files for multi-thread downloads.", + "type": "boolean", + "default": false }, - "retryDelay": { - "description": "Delay between retries. Default is 1s.", - "type": "integer" + "nounc": { + "description": "Disable UNC (long path names) conversion on Windows.", + "type": "boolean", + "default": false, + "example": true }, - "retryMaxCount": { - "description": "Maximum number of retries. Default is 10 retries.", - "type": "integer" + "oneFileSystem": { + "description": "Don't cross filesystem boundaries (unix/macOS only).", + "type": "boolean", + "default": false }, - "scanConcurrency": { - "description": "Maximum number of concurrent scan requests. Default is 1.", - "type": "integer" + "skipLinks": { + "description": "Don't warn about skipped symlinks.", + "type": "boolean", + "default": false }, - "skipInaccessibleFile": { - "description": "Skip inaccessible files. Default is false.", - "type": "boolean" + "skipSpecials": { + "description": "Don't warn about skipped pipes, sockets and device objects.", + "type": "boolean", + "default": false }, - "timeout": { - "description": "IO idle timeout", - "type": "integer" + "timeType": { + "description": "Set what kind of time is returned.", + "type": "string", + "default": "mtime", + "example": "mtime" }, - "useServerModTime": { - "description": "Use server modified time instead of object metadata", - "type": "boolean" + "unicodeNormalization": { + "description": "Apply unicode NFC normalization to paths and filenames.", + "type": "boolean", + "default": false }, - "userAgent": { - "description": "Set the user-agent to a specified string", - "type": "string" + "zeroSizeLinks": { + "description": "Assume the Stat size of links is zero (and read them instead) (deprecated).", + "type": "boolean", + "default": false } } }, - "model.ConfigMap": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "model.Deal": { + "storage.mailruConfig": { "type": "object", "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "checkHash": { + "description": "What should copy do if file checksum is mismatched or invalid.", + "type": "boolean", + "default": true, + "example": true + }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "createdAt": { + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "dealId": { - "type": "integer" + "description": { + "description": "Description of the remote.", + "type": "string" }, - "dealType": { - "$ref": "#/definitions/model.DealType" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "endEpoch": { - "type": "integer" + "pass": { + "description": "Password.", + "type": "string" }, - "errorMessage": { + "quirks": { + "description": "Comma separated list of internal maintenance flags.", "type": "string" }, - "id": { - "type": "integer" + "speedupEnable": { + "description": "Skip full upload if there is another file with same data hash.", + "type": "boolean", + "default": true, + "example": true }, - "label": { + "speedupFilePatterns": { + "description": "Comma separated list of file name patterns eligible for speedup (put by hash).", + "type": "string", + "default": "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", + "example": "" + }, + "speedupMaxDisk": { + "description": "This option allows you to disable speedup (put by hash) for large files.", + "type": "string", + "default": "3Gi", + "example": "0" + }, + "speedupMaxMemory": { + "description": "Files larger than the size given below will always be hashed on disk.", + "type": "string", + "default": "32Mi", + "example": "0" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "lastVerifiedAt": { - "description": "LastVerifiedAt is the last time the deal was verified as active by the tracker", + "tokenUrl": { + "description": "Token server url.", "type": "string" }, - "nextChallengeEpoch": { - "description": "NextChallengeEpoch is the next epoch when a challenge proof is due", - "type": "integer" + "user": { + "description": "User name (usually email).", + "type": "string" }, - "pieceCid": { + "userAgent": { + "description": "HTTP user agent used internally by client.", + "type": "string" + } + } + }, + "storage.megaConfig": { + "type": "object", + "properties": { + "2fa": { + "description": "The 2FA code of your MEGA account if the account is set up with one", "type": "string" }, - "pieceSize": { - "type": "integer" + "debug": { + "description": "Output more debug from Mega.", + "type": "boolean", + "default": false }, - "price": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "proofSetId": { - "description": "PDP-specific fields (only populated for DealTypePDP)", - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "proofSetLive": { - "description": "ProofSetLive indicates if the proof set is live (actively being challenged)", - "type": "boolean" + "hardDelete": { + "description": "Delete files permanently rather than putting them into the trash.", + "type": "boolean", + "default": false }, - "proposalId": { + "masterKey": { + "description": "Master key (internal use only)", "type": "string" }, - "provider": { + "pass": { + "description": "Password.", "type": "string" }, - "scheduleId": { - "description": "Associations", - "type": "integer" + "sessionId": { + "description": "Session (internal use only)", + "type": "string" }, - "sectorStartEpoch": { - "type": "integer" + "useHttps": { + "description": "Use HTTPS for transfers.", + "type": "boolean", + "default": false }, - "startEpoch": { - "type": "integer" + "user": { + "description": "User name.", + "type": "string" + } + } + }, + "storage.netstorageConfig": { + "type": "object", + "properties": { + "account": { + "description": "Set the NetStorage account name", + "type": "string" }, - "state": { - "$ref": "#/definitions/model.DealState" + "description": { + "description": "Description of the remote.", + "type": "string" }, - "updatedAt": { + "host": { + "description": "Domain+path of NetStorage host to connect to.", "type": "string" }, - "verified": { - "type": "boolean" + "protocol": { + "description": "Select between HTTP or HTTPS protocol.", + "type": "string", + "default": "https", + "example": "http" }, - "walletId": { - "type": "integer" + "secret": { + "description": "Set the NetStorage account secret/G2O key for authentication.", + "type": "string" } } }, - "model.DealState": { - "type": "string", - "enum": [ - "proposed", - "published", - "active", - "expired", - "proposal_expired", - "rejected", - "slashed", - "error" - ], - "x-enum-varnames": [ - "DealProposed", - "DealPublished", - "DealActive", - "DealExpired", - "DealProposalExpired", - "DealRejected", - "DealSlashed", - "DealErrored" - ] - }, - "model.DealType": { - "type": "string", - "enum": [ - "market", - "pdp" - ], - "x-enum-varnames": [ - "DealTypeMarket", - "DealTypePDP" - ] - }, - "model.File": { + "storage.onedriveConfig": { "type": "object", - "properties": { - "attachmentId": { - "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", - "type": "integer" + "properties": { + "accessScopes": { + "description": "Set scopes to be requested by rclone.", + "type": "string", + "default": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", + "example": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" }, - "cid": { - "description": "CID is the CID of the file.", + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "directoryId": { - "type": "integer" + "avOverride": { + "description": "Allows download of files the server thinks has a virus.", + "type": "boolean", + "default": false }, - "fileRanges": { - "type": "array", - "items": { - "$ref": "#/definitions/model.FileRange" - } + "chunkSize": { + "description": "Chunk size to upload files with - must be multiple of 320k (327,680 bytes).", + "type": "string", + "default": "10Mi" }, - "hash": { - "description": "Hash is the hash of the file.", + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "id": { - "type": "integer" + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" }, - "lastModifiedNano": { - "type": "integer" + "delta": { + "description": "If set rclone will use delta listing to implement recursive listings.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path is the relative path to the file inside the storage.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "size": { - "description": "Size is the size of the file in bytes.", - "type": "integer" - } - } - }, - "model.FileRange": { - "type": "object", - "properties": { - "cid": { - "description": "CID is the CID of the range.", + "disableSitePermission": { + "description": "Disable the request for Sites.Read.All permission.", + "type": "boolean", + "default": false + }, + "driveId": { + "description": "The ID of the drive to use.", "type": "string" }, - "fileId": { - "type": "integer" + "driveType": { + "description": "The type of the drive (personal | business | documentLibrary).", + "type": "string" }, - "id": { - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot" }, - "jobId": { - "description": "Associations", - "type": "integer" + "exposeOnenoteFiles": { + "description": "Set to make OneNote files show up in directory listings.", + "type": "boolean", + "default": false }, - "length": { - "description": "Length is the length of the range in bytes.", - "type": "integer" + "hardDelete": { + "description": "Permanently delete files on removal.", + "type": "boolean", + "default": false }, - "offset": { - "description": "Offset is the offset of the range inside the file.", - "type": "integer" - } - } - }, - "model.Job": { - "type": "object", - "properties": { - "attachmentId": { - "type": "integer" + "hashType": { + "description": "Specify the hash in use for the backend.", + "type": "string", + "default": "auto", + "example": "auto" }, - "errorMessage": { + "linkPassword": { + "description": "Set the password for links created by the link command.", "type": "string" }, - "errorStackTrace": { + "linkScope": { + "description": "Set the scope of the links created by the link command.", + "type": "string", + "default": "anonymous", + "example": "anonymous" + }, + "linkType": { + "description": "Set the type of the links created by the link command.", + "type": "string", + "default": "view", + "example": "view" + }, + "listChunk": { + "description": "Size of listing chunk.", + "type": "integer", + "default": 1000 + }, + "metadataPermissions": { + "description": "Control whether permissions should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "noVersions": { + "description": "Remove all versions on modifying operations.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Choose national cloud region for OneDrive.", + "type": "string", + "default": "global", + "example": "global" + }, + "rootFolderId": { + "description": "ID of the root folder.", "type": "string" }, - "id": { - "type": "integer" + "serverSideAcrossConfigs": { + "description": "Deprecated: use --server-side-across-configs instead.", + "type": "boolean", + "default": false }, - "state": { - "$ref": "#/definitions/model.JobState" + "tenant": { + "description": "ID of the service principal's tenant. Also called its directory ID.", + "type": "string" }, - "type": { - "$ref": "#/definitions/model.JobType" + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" }, - "workerId": { - "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", + "tokenUrl": { + "description": "Token server url.", "type": "string" + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "off" } } }, - "model.JobState": { - "type": "string", - "enum": [ - "created", - "ready", - "paused", - "processing", - "complete", - "error" - ], - "x-enum-varnames": [ - "Created", - "Ready", - "Paused", - "Processing", - "Complete", - "Error" - ] - }, - "model.JobType": { - "type": "string", - "enum": [ - "scan", - "pack", - "daggen" - ], - "x-enum-varnames": [ - "Scan", - "Pack", - "DagGen" - ] - }, - "model.Preparation": { + "storage.oosEnv_authConfig": { "type": "object", "properties": { - "createdAt": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", "type": "string" }, - "deleteAfterExport": { - "description": "DeleteAfterExport is a flag that indicates whether the source files should be deleted after export.", - "type": "boolean" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "id": { - "type": "integer" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "maxSize": { - "type": "integer" + "namespace": { + "description": "Object storage namespace", + "type": "string" }, - "minPieceSize": { - "description": "Minimum piece size for the preparation, applies only to DAG and remainder pieces", - "type": "integer" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "name": { + "region": { + "description": "Object storage Region", "type": "string" }, - "noDag": { - "type": "boolean" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "noInline": { - "type": "boolean" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "outputStorages": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Storage" - } + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "pieceSize": { - "type": "integer" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" }, - "sourceStorages": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Storage" - } + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" }, - "updatedAt": { - "type": "string" + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" }, - "walletId": { - "description": "Associations", - "type": "integer" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" } } }, - "model.Schedule": { + "storage.oosInstance_principal_authConfig": { "type": "object", "properties": { - "allowedPieceCids": { - "type": "array", - "items": { - "type": "string" - } + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false }, - "announceToIpni": { - "type": "boolean" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "createdAt": { + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", "type": "string" }, - "dealType": { - "$ref": "#/definitions/model.DealType" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "duration": { - "type": "integer" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "errorMessage": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "force": { - "type": "boolean" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "httpHeaders": { - "$ref": "#/definitions/model.ConfigMap" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "id": { - "type": "integer" + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" }, - "keepUnsealed": { - "type": "boolean" + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "maxPendingDealNumber": { - "type": "integer" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "maxPendingDealSize": { - "type": "integer" + "namespace": { + "description": "Object storage namespace", + "type": "string" }, - "notes": { + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", "type": "string" }, - "preparationId": { - "description": "Associations", - "type": "integer" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "pricePerDeal": { - "type": "number" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "pricePerGb": { - "type": "number" + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "pricePerGbEpoch": { - "type": "number" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" }, - "provider": { + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosNo_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", "type": "string" }, - "scheduleCron": { + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", "type": "string" }, - "scheduleCronPerpetual": { - "type": "boolean" + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "scheduleDealNumber": { - "type": "integer" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "scheduleDealSize": { - "type": "integer" + "region": { + "description": "Object storage Region", + "type": "string" }, - "startDelay": { - "type": "integer" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "state": { - "$ref": "#/definitions/model.ScheduleState" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "totalDealNumber": { - "type": "integer" + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "totalDealSize": { - "type": "integer" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" }, - "updatedAt": { - "type": "string" + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" }, - "urlTemplate": { - "type": "string" + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" }, - "verified": { - "type": "boolean" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" } } }, - "model.ScheduleState": { - "type": "string", - "enum": [ - "active", - "paused", - "error", - "completed" - ], - "x-enum-varnames": [ - "ScheduleActive", - "SchedulePaused", - "ScheduleError", - "ScheduleCompleted" - ] - }, - "model.Storage": { + "storage.oosResource_principal_authConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "ClientConfig is the HTTP configuration for the storage, if applicable.", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false }, - "config": { - "description": "Config is a map of key-value pairs that can be used to store RClone options.", - "allOf": [ - { - "$ref": "#/definitions/model.ConfigMap" - } - ] + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "createdAt": { + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", "type": "string" }, - "id": { - "type": "integer" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "name": { - "type": "string" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "path": { - "description": "Path is the path to the storage root.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "preparationsAsOutput": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Preparation" - } + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "preparationsAsSource": { - "description": "Associations", - "type": "array", - "items": { - "$ref": "#/definitions/model.Preparation" - } + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "type": { + "endpoint": { + "description": "Endpoint for Object storage API.", "type": "string" }, - "updatedAt": { - "type": "string" - } - } - }, - "model.Wallet": { - "type": "object", - "properties": { - "actorId": { - "description": "nullable, links to on-chain actor f0...", - "type": "string" + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "address": { - "description": "filecoin address (f1.../f3...)", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", "type": "string" }, - "id": { - "type": "integer" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "keyPath": { - "description": "absolute path to key file", + "region": { + "description": "Object storage Region", "type": "string" }, - "keyStore": { - "description": "local, yubikey, aws-kms, etc", - "type": "string" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "name": { - "description": "optional label", - "type": "string" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" } } }, - "schedule.CreateRequest": { + "storage.oosUser_principal_authConfig": { "type": "object", "properties": { - "allowedPieceCids": { - "description": "Allowed piece CIDs in this schedule", - "type": "array", - "items": { - "type": "string" - } - }, - "dealType": { - "description": "Deal type: market (f05) or pdp (f41)", - "type": "string" + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false }, - "duration": { - "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "default": "12840h" + "default": "5Mi" }, - "force": { - "description": "Force to send out deals regardless of replication restriction", - "type": "boolean" + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", + "type": "string" }, - "httpHeaders": { - "description": "http headers to be passed with the request (i.e. key=value)", - "type": "array", - "items": { - "type": "string" - } + "configFile": { + "description": "Path to OCI config file", + "type": "string", + "default": "~/.oci/config", + "example": "~/.oci/config" }, - "ipni": { - "description": "Whether the deal should be IPNI", - "type": "boolean", - "default": true + "configProfile": { + "description": "Profile name inside the oci config file", + "type": "string", + "default": "Default", + "example": "Default" }, - "keepUnsealed": { - "description": "Whether the deal should be kept unsealed", - "type": "boolean", - "default": true + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "maxPendingDealNumber": { - "description": "Max pending deal number", - "type": "integer" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "maxPendingDealSize": { - "description": "Max pending deal size in human readable format, i.e. 100 TiB", + "description": { + "description": "Description of the remote.", "type": "string" }, - "notes": { - "description": "Notes", - "type": "string" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "preparation": { - "description": "Preparation ID or name", - "type": "string" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "pricePerDeal": { - "description": "Price in FIL per deal", - "type": "number", - "default": 0 + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" }, - "pricePerGb": { - "description": "Price in FIL per GiB", - "type": "number", - "default": 0 + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "pricePerGbEpoch": { - "description": "Price in FIL per GiB per epoch", - "type": "number", - "default": 0 + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "provider": { - "description": "Provider", + "namespace": { + "description": "Object storage namespace", "type": "string" }, - "scheduleCron": { - "description": "Schedule cron pattern", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", "type": "string" }, - "scheduleCronPerpetual": { - "description": "Whether a cron schedule should run in definitely", - "type": "boolean" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "scheduleDealNumber": { - "description": "Number of deals per scheduled time", - "type": "integer" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "scheduleDealSize": { - "description": "Size of deals per schedule trigger in human readable format, i.e. 100 TiB", - "type": "string" + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "startDelay": { - "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", "type": "string", - "default": "72h" + "example": "" }, - "totalDealNumber": { - "description": "Total number of deals", - "type": "integer" + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" }, - "totalDealSize": { - "description": "Total size of deals in human readable format, i.e. 100 TiB", - "type": "string" + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" }, - "urlTemplate": { - "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", - "type": "string" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 }, - "verified": { - "description": "Whether the deal should be verified", - "type": "boolean", - "default": true + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" } } }, - "schedule.UpdateRequest": { + "storage.oosWorkload_identity_authConfig": { "type": "object", "properties": { - "allowedPieceCids": { - "description": "Allowed piece CIDs in this schedule", - "type": "array", - "items": { - "type": "string" - } + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false }, - "dealType": { - "description": "Deal type: market (f05) or pdp (f41)", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Specify compartment OCID, if you need to list buckets.", "type": "string" }, - "duration": { - "description": "Duration in epoch or in duration format, i.e. 1500000, 2400h", + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", "type": "string", - "default": "12840h" + "default": "4.656Gi" }, - "force": { - "description": "Force to send out deals regardless of replication restriction", - "type": "boolean" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "httpHeaders": { - "description": "http headers to be passed with the request (i.e. key=value)", - "type": "array", - "items": { - "type": "string" - } + "description": { + "description": "Description of the remote.", + "type": "string" }, - "ipni": { - "description": "Whether the deal should be IPNI", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", - "default": true + "default": false }, - "keepUnsealed": { - "description": "Whether the deal should be kept unsealed", + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", "type": "boolean", - "default": true + "default": false }, - "maxPendingDealNumber": { - "description": "Max pending deal number", - "type": "integer" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "maxPendingDealSize": { - "description": "Max pending deal size in human readable format, i.e. 100 TiB", + "namespace": { + "description": "Object storage namespace", "type": "string" }, - "notes": { - "description": "Notes", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", "type": "string" }, - "pricePerDeal": { - "description": "Price in FIL per deal", - "type": "number", - "default": 0 + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "pricePerGb": { - "description": "Price in FIL per GiB", - "type": "number", - "default": 0 + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "pricePerGbEpoch": { - "description": "Price in FIL per GiB per epoch", - "type": "number", - "default": 0 + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "scheduleCron": { - "description": "Schedule cron pattern", - "type": "string" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 }, - "scheduleCronPerpetual": { - "description": "Whether a cron schedule should run in definitely", - "type": "boolean" + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.opendriveConfig": { + "type": "object", + "properties": { + "access": { + "description": "Files and folders will be uploaded with this access permission (default private)", + "type": "string", + "default": "private", + "example": "private" }, - "scheduleDealNumber": { - "description": "Number of deals per scheduled time", - "type": "integer" + "chunkSize": { + "description": "Files will be uploaded in chunks this size.", + "type": "string", + "default": "10Mi" }, - "scheduleDealSize": { - "description": "Size of deals per schedule trigger in human readable format, i.e. 100 TiB", + "description": { + "description": "Description of the remote.", "type": "string" }, - "startDelay": { - "description": "Deal start delay in epoch or in duration format, i.e. 1000, 72h", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "default": "72h" - }, - "totalDealNumber": { - "description": "Total number of deals", - "type": "integer" + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot" }, - "totalDealSize": { - "description": "Total size of deals in human readable format, i.e. 100 TiB", + "password": { + "description": "Password.", "type": "string" }, - "urlTemplate": { - "description": "URL template with PIECE_CID placeholder for boost to fetch the CAR file, i.e. http://127.0.0.1/piece/{PIECE_CID}.car", + "username": { + "description": "Username.", "type": "string" - }, - "verified": { - "description": "Whether the deal should be verified", - "type": "boolean", - "default": true } } }, - "storage.DirEntry": { + "storage.pcloudConfig": { "type": "object", "properties": { - "dirId": { + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "hash": { + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "isDir": { - "type": "boolean" + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" }, - "lastModified": { + "description": { + "description": "Description of the remote.", "type": "string" }, - "numItems": { - "type": "integer" + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "path": { + "hostname": { + "description": "Hostname to connect to.", + "type": "string", + "default": "api.pcloud.com", + "example": "api.pcloud.com" + }, + "password": { + "description": "Your pcloud password.", "type": "string" }, - "size": { - "type": "integer" - } - } - }, - "storage.RenameRequest": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { + "rootFolderId": { + "description": "Fill in for rclone to use a non root folder as its starting point.", + "type": "string", + "default": "d0" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "username": { + "description": "Your pcloud username.", "type": "string" } } }, - "storage.azureblobConfig": { + "storage.premiumizemeConfig": { "type": "object", "properties": { - "accessTier": { - "description": "Access tier of blob: hot, cool, cold or archive.", + "apiKey": { + "description": "API Key.", "type": "string" }, - "account": { - "description": "Azure Storage Account Name.", + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "archiveTierDelete": { - "description": "Delete archive tier blobs before overwriting.", + "clientCredentials": { + "description": "Use client credentials OAuth flow.", "type": "boolean", "default": false }, - "chunkSize": { - "description": "Upload chunk size.", - "type": "string", - "default": "4Mi" - }, - "clientCertificatePassword": { - "description": "Password for the certificate file (optional).", - "type": "string" - }, - "clientCertificatePath": { - "description": "Path to a PEM or PKCS12 certificate file including the private key.", - "type": "string" - }, "clientId": { - "description": "The ID of the client in use.", + "description": "OAuth Client Id.", "type": "string" }, "clientSecret": { - "description": "One of the service principal's client secrets", + "description": "OAuth Client Secret.", "type": "string" }, - "clientSendCertificateChain": { - "description": "Send the certificate chain when using certificate auth.", - "type": "boolean", - "default": false - }, - "deleteSnapshots": { - "description": "Set to specify how to deal with snapshots on blob deletion.", - "type": "string", - "example": "" - }, "description": { "description": "Description of the remote.", "type": "string" }, - "directoryMarkers": { - "description": "Upload an empty object with a trailing slash when a new directory is created", - "type": "boolean", - "default": false - }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", - "type": "boolean", - "default": false - }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8" + "default": "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "endpoint": { - "description": "Endpoint for the service.", + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "envAuth": { - "description": "Read credentials from runtime (environment variables, CLI or MSI).", - "type": "boolean", - "default": false - }, - "key": { - "description": "Storage Account Shared Key.", + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.putioConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "listChunk": { - "description": "Size of blob list.", - "type": "integer", - "default": 5000 - }, - "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed. (no longer used)", - "type": "string", - "default": "1m0s" - }, - "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "clientCredentials": { + "description": "Use client credentials OAuth flow.", "type": "boolean", "default": false }, - "msiClientId": { - "description": "Object ID of the user-assigned MSI to use, if any.", + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "msiMiResId": { - "description": "Azure resource ID of the user-assigned MSI to use, if any.", + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "msiObjectId": { - "description": "Object ID of the user-assigned MSI to use, if any.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "noCheckContainer": { - "description": "If set, don't attempt to check the container exists or create it.", - "type": "boolean", - "default": false + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "noHeadObject": { - "description": "If set, do not do HEAD before GET when getting objects.", - "type": "boolean", - "default": false + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" }, - "password": { - "description": "The user's password", + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.qingstorConfig": { + "type": "object", + "properties": { + "accessKeyId": { + "description": "QingStor Access Key ID.", "type": "string" }, - "publicAccess": { - "description": "Public access level of a container: blob or container.", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "example": "" + "default": "4Mi" }, - "sasUrl": { - "description": "SAS URL for container level access only.", + "connectionRetries": { + "description": "Number of connection retries.", + "type": "integer", + "default": 3 + }, + "description": { + "description": "Description of the remote.", "type": "string" }, - "servicePrincipalFile": { - "description": "Path to file containing credentials for use with a service principal.", + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Ctl,InvalidUtf8" + }, + "endpoint": { + "description": "Enter an endpoint URL to connection QingStor API.", "type": "string" }, - "tenant": { - "description": "ID of the service principal's tenant. Also called its directory ID.", + "envAuth": { + "description": "Get QingStor credentials from runtime.", + "type": "boolean", + "default": false, + "example": false + }, + "secretAccessKey": { + "description": "QingStor Secret Access Key (password).", "type": "string" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads.", "type": "integer", - "default": 16 + "default": 1 }, "uploadCutoff": { - "description": "Cutoff for switching to chunked upload (\u003c= 256 MiB) (deprecated).", - "type": "string" - }, - "useEmulator": { - "description": "Uses local storage emulator if provided as 'true'.", - "type": "boolean", - "default": false - }, - "useMsi": { - "description": "Use a managed service identity to authenticate (only works in Azure).", - "type": "boolean", - "default": false + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "username": { - "description": "User name (usually an email address)", - "type": "string" + "zone": { + "description": "Zone to connect to.", + "type": "string", + "example": "pek3a" } } }, - "storage.b2Config": { + "storage.s3AWSConfig": { "type": "object", "properties": { - "account": { - "description": "Account ID or Application Key ID.", + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, "chunkSize": { - "description": "Upload chunk size.", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "96Mi" + "default": "5Mi" }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", - "default": "4Gi" + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, + "directoryBucket": { + "description": "Set to use AWS Directory Buckets", + "type": "boolean", + "default": false + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { - "description": "Disable checksums for large (\u003e upload cutoff) files.", + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "downloadAuthDuration": { - "description": "Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.", - "type": "string", - "default": "1w" + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false }, "downloadUrl": { "description": "Custom endpoint for downloads.", @@ -7423,26 +13483,53 @@ "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for the service.", + "description": "Endpoint for S3 API.", "type": "string" }, - "hardDelete": { - "description": "Permanently delete files on remote removal, otherwise hide files.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", "type": "boolean", "default": false }, - "key": { - "description": "Application Key.", - "type": "string" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "lifecycle": { - "description": "Set the number of days deleted files should be kept when creating a bucket.", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string", + "example": "" + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, "memoryPoolFlushTime": { "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", @@ -7453,2549 +13540,1584 @@ "type": "boolean", "default": false }, - "testMode": { - "description": "A flag string for X-Bz-Test-Mode header for debugging.", - "type": "string" - }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 4 - }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "200Mi" + "default": "unset" }, - "versionAt": { - "description": "Show file versions as they were at the specified time.", - "type": "string", - "default": "off" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "versions": { - "description": "Include old versions in directory listings.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false - } - } - }, - "storage.boxConfig": { - "type": "object", - "properties": { - "accessToken": { - "description": "Box App Primary Access Token", - "type": "string" }, - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "boxConfigFile": { - "description": "Box App config.json location", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "boxSubType": { + "region": { + "description": "Region to connect to.", "type": "string", - "default": "user", - "example": "user" + "example": "us-east-1" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "requesterPays": { + "description": "Enables requester pays option when interacting with S3 bucket.", + "type": "boolean", + "default": false }, - "clientSecret": { - "description": "OAuth Client Secret.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "commitRetries": { - "description": "Max number of times to try committing a multipart file.", - "type": "integer", - "default": 100 - }, - "description": { - "description": "Description of the remote.", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot" - }, - "impersonate": { - "description": "Impersonate this user ID when using a service account.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "listChunk": { - "description": "Size of listing chunk 1-1000.", - "type": "integer", - "default": 1000 - }, - "ownedBy": { - "description": "Only show items owned by the login (email address) passed in.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" }, - "rootFolderId": { - "description": "Fill in for rclone to use a non root folder as its starting point.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "0" + "default": "Off" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", "type": "string" }, - "uploadCutoff": { - "description": "Cutoff for switching to multipart upload (\u003e= 50 MiB).", - "type": "string", - "default": "50Mi" - } - } - }, - "storage.createAzureblobStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] - }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.azureblobConfig" - } - ] - }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" - } - } - }, - "storage.createB2StorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.b2Config" - } - ] + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createBoxStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.boxConfig" - } - ] + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", + "type": "string", + "example": "" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "REDUCED_REDUNDANCY" + }, + "stsEndpoint": { + "description": "Endpoint for STS (deprecated).", "type": "string" - } - } - }, - "storage.createDriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.driveConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "name": { - "description": "Name of the storage, must be unique", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "my-storage" + "default": "200Mi" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createDropboxStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAccelerateEndpoint": { + "description": "If true use the AWS S3 accelerated endpoint.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.dropboxConfig" - } - ] + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createFichierStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.fichierConfig" - } - ] + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createFilefabricStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.filefabricConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createFtpStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.ftpConfig" - } - ] + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "example": "my-storage" + "default": "off" }, - "path": { - "description": "Path of the storage", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createGcsStorageRequest": { + "storage.s3AlibabaConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.gcsConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" + "example": "private" }, - "path": { - "description": "Path of the storage", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createGphotosStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.gphotosConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" - } - } - }, - "storage.createHdfsStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.hdfsConfig" - } - ] + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the storage, must be unique", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "my-storage" + "example": "oss-accelerate.aliyuncs.com" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createHidriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.hidriveConfig" - } - ] + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "name": { - "description": "Name of the storage, must be unique", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createHttpStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.httpConfig" - } - ] + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "name": { - "description": "Name of the storage, must be unique", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "example": "my-storage" + "default": "1m0s" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createInternetarchiveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.internetarchiveConfig" - } - ] + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.createJottacloudStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.jottacloudConfig" - } - ] + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" - } - } - }, - "storage.createKoofrDigistorageStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.koofrDigistorageConfig" - } - ] + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createKoofrKoofrStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.koofrKoofrConfig" - } - ] + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", "type": "string" - } - } - }, - "storage.createKoofrOtherStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.koofrOtherConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "name": { - "description": "Name of the storage, must be unique", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "my-storage" + "default": "200Mi" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createLocalStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.localConfig" - } - ] + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createMailruStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.mailruConfig" - } - ] + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createMegaStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.megaConfig" - } - ] + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "example": "my-storage" + "default": "off" }, - "path": { - "description": "Path of the storage", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createNetstorageStorageRequest": { + "storage.s3ArvanCloudConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.netstorageConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" + "example": "private" }, - "path": { - "description": "Path of the storage", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createOnedriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.onedriveConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" - } - } - }, - "storage.createOosEnv_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosEnv_authConfig" - } - ] + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the storage, must be unique", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "my-storage" + "example": "s3.ir-thr-at1.arvanstorage.ir" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createOosInstance_principal_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosInstance_principal_authConfig" - } - ] + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "name": { - "description": "Name of the storage, must be unique", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createOosNo_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosNo_authConfig" - } - ] + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string", + "example": "ir-thr-at1" }, - "name": { - "description": "Name of the storage, must be unique", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "example": "my-storage" + "default": "1m0s" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createOosResource_principal_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosResource_principal_authConfig" - } - ] + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.createOosUser_principal_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosUser_principal_authConfig" - } - ] + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" - } - } - }, - "storage.createOosWorkload_identity_authStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.oosWorkload_identity_authConfig" - } - ] + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createOpendriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.opendriveConfig" - } - ] + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", "type": "string" - } - } - }, - "storage.createPcloudStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.pcloudConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "name": { - "description": "Name of the storage, must be unique", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createPremiumizemeStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "default": "200Mi" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.premiumizemeConfig" - } - ] + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createPutioStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.putioConfig" - } - ] + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createQingstorStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.qingstorConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3AWSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3AWSConfig" - } - ] + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "example": "my-storage" + "default": "off" }, - "path": { - "description": "Path of the storage", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createS3AlibabaStorageRequest": { + "storage.s3BizflyCloudConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3AlibabaConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3ArvanCloudStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "example": "private" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3ArvanCloudConfig" - } - ] + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "name": { - "description": "Name of the storage, must be unique", + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", "type": "string", - "example": "my-storage" + "default": "4.656Gi" }, - "path": { - "description": "Path of the storage", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createS3CephStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3CephConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" - } - } - }, - "storage.createS3ChinaMobileStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3ChinaMobileConfig" - } - ] + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the storage, must be unique", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "my-storage" + "example": "hn.ss.bfcplatform.vn" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3CloudflareStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3CloudflareConfig" - } - ] + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "name": { - "description": "Name of the storage, must be unique", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3DigitalOceanStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3DigitalOceanConfig" - } - ] + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" }, - "name": { - "description": "Name of the storage, must be unique", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3DreamhostStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3DreamhostConfig" - } - ] + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3GCSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3GCSConfig" - } - ] + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "region": { + "description": "Region to connect to.", "type": "string", - "example": "my-storage" + "example": "hn" }, - "path": { - "description": "Path of the storage", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" - } - } - }, - "storage.createS3HuaweiOBSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] - }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3HuaweiOBSConfig" - } - ] }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" - } - } - }, - "storage.createS3IBMCOSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3IBMCOSConfig" - } - ] + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createS3IDriveStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3IDriveConfig" - } - ] + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3IONOSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3IONOSConfig" - } - ] + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "name": { - "description": "Name of the storage, must be unique", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3LeviiaStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3LeviiaConfig" - } - ] + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3LiaraStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3LiaraConfig" - } - ] + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3LinodeStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3LinodeConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3LyveCloudStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3LyveCloudConfig" - } - ] + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "example": "my-storage" + "default": "off" }, - "path": { - "description": "Path of the storage", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createS3MagaluStorageRequest": { + "storage.s3CephConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3MagaluConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" + "example": "private" }, - "path": { - "description": "Path of the storage", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createS3MinioStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3MinioConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "my-storage" + "default": "Slash,InvalidUtf8,Dot" }, - "path": { - "description": "Path of the storage", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string" - } - } - }, - "storage.createS3NeteaseStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3NeteaseConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "name": { - "description": "Name of the storage, must be unique", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" - } - } - }, - "storage.createS3OtherStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3OtherConfig" - } - ] + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "name": { - "description": "Name of the storage, must be unique", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3PetaboxStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "default": "1m0s" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3PetaboxConfig" - } - ] + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.createS3QiniuStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3QiniuConfig" - } - ] + "region": { + "description": "Region to connect to.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" - } - } - }, - "storage.createS3RackCorpStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3RackCorpConfig" - } - ] + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createS3RcloneStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3RcloneConfig" - } - ] + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" - } - } - }, - "storage.createS3ScalewayStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3ScalewayConfig" - } - ] + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3SeaweedFSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3SeaweedFSConfig" - } - ] + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" }, - "name": { - "description": "Name of the storage, must be unique", + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3StackPathStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3StackPathConfig" - } - ] + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "name": { - "description": "Name of the storage, must be unique", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3StorjStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3StorjConfig" - } - ] + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3SynologyStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3SynologyConfig" - } - ] + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createS3TencentCOSStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3TencentCOSConfig" - } - ] + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.createS3WasabiStorageRequest": { + "storage.s3ChinaMobileConfig": { "type": "object", "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.s3WasabiConfig" - } - ] + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "my-storage" + "example": "private" }, - "path": { - "description": "Path of the storage", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" - } - } - }, - "storage.createSeafileStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.seafileConfig" - } - ] + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" - } - } - }, - "storage.createSftpStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.sftpConfig" - } - ] + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "name": { - "description": "Name of the storage, must be unique", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "my-storage" + "example": "eos-wuxi-1.cmecloud.cn" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createSharefileStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.sharefileConfig" - } - ] + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createSiaStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.siaConfig" - } - ] + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "name": { - "description": "Name of the storage, must be unique", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string", - "example": "my-storage" + "example": "wuxi1" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createSmbStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.smbConfig" - } - ] + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" }, - "name": { - "description": "Name of the storage, must be unique", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createStorjExistingStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.storjExistingConfig" - } - ] + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "path": { - "description": "Path of the storage", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.createStorjNewStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.storjNewConfig" - } - ] + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" - } - } - }, - "storage.createSugarsyncStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.sugarsyncConfig" - } - ] + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "my-storage" + "default": "Off" }, - "path": { - "description": "Path of the storage", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.createSwiftStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.swiftConfig" - } - ] + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string" }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "path": { - "description": "Path of the storage", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" - } - } - }, - "storage.createUnionStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.unionConfig" - } - ] + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createUptoboxStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.uptoboxConfig" - } - ] + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "name": { - "description": "Name of the storage, must be unique", + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", "type": "string", - "example": "my-storage" + "example": "" }, - "path": { - "description": "Path of the storage", + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", "type": "string" - } - } - }, - "storage.createWebdavStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.webdavConfig" - } - ] + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "name": { - "description": "Name of the storage, must be unique", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "my-storage" + "default": "200Mi" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createYandexStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.yandexConfig" - } - ] + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createZohoStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.zohoConfig" - } - ] + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "name": { - "description": "Name of the storage, must be unique", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "my-storage" + "default": "unset" }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.driveConfig": { - "type": "object", - "properties": { - "acknowledgeAbuse": { - "description": "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "allowImportNameChange": { - "description": "Allow the filetype to change when uploading Google docs.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "alternateExport": { - "description": "Deprecated: No longer needed.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "authOwnerOnly": { - "description": "Only consider files owned by the authenticated user.", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", "default": false - }, - "authUrl": { - "description": "Auth server URL.", + } + } + }, + "storage.s3CloudflareConfig": { + "type": "object", + "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", "type": "string" }, "chunkSize": { - "description": "Upload chunk size.", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "8Mi" - }, - "clientId": { - "description": "Google Application Client Id", - "type": "string" + "default": "5Mi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "copyShortcutContent": { - "description": "Server side copy contents of shortcuts instead of the shortcut.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, @@ -10003,499 +15125,513 @@ "description": "Description of the remote.", "type": "string" }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, "disableHttp2": { - "description": "Disable drive using http2.", + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", - "default": true + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "InvalidUtf8" + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { - "description": "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", "default": false, "example": false }, - "exportFormats": { - "description": "Comma separated list of preferred formats for downloading Google docs.", - "type": "string", - "default": "docx,xlsx,pptx,svg" - }, - "fastListBugFix": { - "description": "Work around a bug in Google Drive listing.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", "default": true }, - "formats": { - "description": "Deprecated: See export_formats.", - "type": "string" - }, - "impersonate": { - "description": "Impersonate this user when using a service account.", - "type": "string" - }, - "importFormats": { - "description": "Comma separated list of preferred formats for uploading Google docs.", - "type": "string" - }, - "keepRevisionForever": { - "description": "Keep new head revision of each file forever.", - "type": "boolean", - "default": false - }, "listChunk": { - "description": "Size of listing chunk 100-1000, 0 to disable.", + "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", "default": 1000 }, - "metadataLabels": { - "description": "Control whether labels should be read or written in metadata.", - "type": "string", - "default": "off", - "example": "off" - }, - "metadataOwner": { - "description": "Control whether owner should be read or written in metadata.", - "type": "string", - "default": "read", - "example": "off" - }, - "metadataPermissions": { - "description": "Control whether permissions should be read or written in metadata.", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "off", - "example": "off" + "default": "unset" }, - "pacerBurst": { - "description": "Number of API calls to allow without sleeping.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", "type": "integer", - "default": 100 - }, - "pacerMinSleep": { - "description": "Minimum time to sleep between API calls.", - "type": "string", - "default": "100ms" - }, - "resourceKey": { - "description": "Resource key for accessing a link-shared file.", - "type": "string" + "default": 0 }, - "rootFolderId": { - "description": "ID of the root folder.", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "scope": { - "description": "Comma separated list of scopes that rclone should use when requesting access from drive.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "example": "drive" + "default": "1m0s" }, - "serverSideAcrossConfigs": { - "description": "Deprecated: use --server-side-across-configs instead.", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "serviceAccountCredentials": { - "description": "Service Account Credentials JSON blob.", - "type": "string" - }, - "serviceAccountFile": { - "description": "Service Account Credentials JSON file path.", - "type": "string" + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, - "sharedWithMe": { - "description": "Only show files that are shared with me.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "showAllGdocs": { - "description": "Show all Google Docs including non-exportable ones in listings.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "sizeAsQuota": { - "description": "Show sizes as storage quota usage, not actual size.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "skipChecksumGphotos": { - "description": "Skip checksums on Google photos and videos only.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false }, - "skipDanglingShortcuts": { - "description": "If set skip dangling shortcut files.", - "type": "boolean", - "default": false + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" }, - "skipGdocs": { - "description": "Skip google documents in all listings.", - "type": "boolean", - "default": false + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "auto" }, - "skipShortcuts": { - "description": "If set skip shortcut files.", - "type": "boolean", - "default": false + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "starredOnly": { - "description": "Only show files that are starred.", - "type": "boolean", - "default": false + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "stopOnDownloadLimit": { - "description": "Make download limit errors be fatal.", - "type": "boolean", - "default": false + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "stopOnUploadLimit": { - "description": "Make upload limit errors be fatal.", - "type": "boolean", - "default": false + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "teamDrive": { - "description": "ID of the Shared Drive (Team Drive).", + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "trashedOnly": { - "description": "Only show files that are in the trash.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "default": "8Mi" + "default": "unset" }, - "useCreatedDate": { - "description": "Use file created date instead of modified date.", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "useSharedDate": { - "description": "Use date file was shared instead of modified date.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "useTrash": { - "description": "Send files to the trash instead of deleting permanently.", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", - "default": true - }, - "v2DownloadMinSize": { - "description": "If Object's are greater, use drive v2 API to download.", - "type": "string", - "default": "off" + "default": false } } }, - "storage.dropboxConfig": { + "storage.s3CubbitConfig": { "type": "object", "properties": { - "authUrl": { - "description": "Auth server URL.", + "accessKeyId": { + "description": "AWS Access Key ID.", "type": "string" }, - "batchCommitTimeout": { - "description": "Max time to wait for a batch to finish committing", - "type": "string", - "default": "10m0s" - }, - "batchMode": { - "description": "Upload file batching sync|async|off.", - "type": "string", - "default": "sync" - }, - "batchSize": { - "description": "Max number of files in upload batch.", - "type": "integer", - "default": 0 + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "batchTimeout": { - "description": "Max time to allow an idle upload batch before uploading.", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "0s" + "example": "private" }, "chunkSize": { - "description": "Upload chunk size (\u003c 150Mi).", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "48Mi" + "default": "5Mi" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "impersonate": { - "description": "Impersonate this user when using a business account.", - "type": "string" + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string", + "example": "s3.cubbit.eu" }, - "pacerMinSleep": { - "description": "Minimum time to sleep between API calls.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "10ms" + "default": "unset" }, - "rootNamespace": { - "description": "Specify a different Dropbox namespace ID to use as the root for all paths.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "sharedFiles": { - "description": "Instructs rclone to work on individual shared files.", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "sharedFolders": { - "description": "Instructs rclone to work on shared folders.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" - } - } - }, - "storage.fichierConfig": { - "type": "object", - "properties": { - "apiKey": { - "description": "Your API Key, get it from https://1fichier.com/console/params.pl.", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "cdn": { - "description": "Set if you wish to use CDN download links.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false }, - "description": { - "description": "Description of the remote.", + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "region": { + "description": "Region to connect to.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot" + "example": "eu-west-1" }, - "filePassword": { - "description": "If you want to download a shared file that is password protected, add this parameter.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "folderPassword": { - "description": "If you want to list the files in a shared folder that is password protected, add this parameter.", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "sharedFolder": { - "description": "If you want to download a shared folder, add this parameter.", - "type": "string" - } - } - }, - "storage.filefabricConfig": { - "type": "object", - "properties": { - "description": { - "description": "Description of the remote.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,Del,Ctl,InvalidUtf8,Dot" - }, - "permanentToken": { - "description": "Permanent Authentication Token.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" }, - "rootFolderId": { - "description": "ID of the root folder.", - "type": "string" + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, - "token": { - "description": "Session Token.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "tokenExpiry": { - "description": "Token expiry time.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "url": { - "description": "URL of the Enterprise File Fabric to connect to.", - "type": "string", - "example": "https://storagemadeeasy.com" - }, - "version": { - "description": "Version read from the file fabric.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" - } - } - }, - "storage.ftpConfig": { - "type": "object", - "properties": { - "askPassword": { - "description": "Allow asking for FTP password when needed.", - "type": "boolean", - "default": false }, - "closeTimeout": { - "description": "Maximum time to wait for a response to close.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "1m0s" + "default": "unset" }, - "concurrency": { - "description": "Maximum number of FTP simultaneous connections, 0 for unlimited.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 0 + "default": 4 }, - "description": { - "description": "Description of the remote.", - "type": "string" + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "disableEpsv": { - "description": "Disable using EPSV even if server advertises support.", - "type": "boolean", - "default": false + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "disableMlsd": { - "description": "Disable using MLSD even if server advertises support.", - "type": "boolean", - "default": false + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "disableTls13": { - "description": "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "disableUtf8": { - "description": "Disable using UTF-8 even if server advertises support.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "Slash,Del,Ctl,RightSpace,Dot", - "example": "Asterisk,Ctl,Dot,Slash" + "default": "unset" }, - "explicitTls": { - "description": "Use Explicit FTPS (FTP over TLS).", - "type": "boolean", - "default": false + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "forceListHidden": { - "description": "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "host": { - "description": "FTP host to connect to.", - "type": "string" + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "idleTimeout": { - "description": "Max time before closing idle connections.", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "default": "1m0s" + "default": "unset" }, - "noCheckCertificate": { - "description": "Do not verify the TLS certificate of the server.", + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "pass": { - "description": "FTP password.", - "type": "string" - }, - "port": { - "description": "FTP port number.", - "type": "integer", - "default": 21 - }, - "shutTimeout": { - "description": "Maximum time to wait for data connection closing status.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "1m0s" - }, - "socksProxy": { - "description": "Socks 5 proxy host.", - "type": "string" + "default": "off" }, - "tls": { - "description": "Use Implicit FTPS (FTP over TLS).", + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "tlsCacheSize": { - "description": "Size of TLS session cache for all control and data connections.", - "type": "integer", - "default": 32 - }, - "user": { - "description": "FTP username.", - "type": "string", - "default": "$USER" - }, - "writingMdtm": { - "description": "Use MDTM to set modification time (VsFtpd quirk)", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", "default": false } } }, - "storage.gcsConfig": { + "storage.s3DigitalOceanConfig": { "type": "object", "properties": { - "anonymous": { - "description": "Access public buckets and objects without credentials.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "authUrl": { - "description": "Auth server URL.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, "bucketAcl": { - "description": "Access Control List for new buckets.", + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "authenticatedRead" - }, - "bucketPolicyOnly": { - "description": "Access checks should use bucket-level IAM policies.", - "type": "boolean", - "default": false + "example": "private" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, "decompress": { "description": "If set this will decompress gzip encoded objects.", @@ -10511,748 +15647,779 @@ "type": "boolean", "default": false }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,CrLf,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for the service.", - "type": "string" + "description": "Endpoint for S3 API.", + "type": "string", + "example": "syd1.digitaloceanspaces.com" }, "envAuth": { - "description": "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", "default": false, "example": false }, - "location": { - "description": "Location for the newly created buckets.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "example": "" + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, "noCheckBucket": { "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "objectAcl": { - "description": "Access Control List for new objects.", - "type": "string", - "example": "authenticatedRead" + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "projectNumber": { - "description": "Project number.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "serviceAccountCredentials": { - "description": "Service Account Credentials JSON blob.", + "region": { + "description": "Region to connect to.", "type": "string" }, - "serviceAccountFile": { - "description": "Service Account Credentials JSON file path.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing objects in Google Cloud Storage.", - "type": "string", - "example": "" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "userProject": { - "description": "User project.", - "type": "string" - } - } - }, - "storage.gphotosConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "batchCommitTimeout": { - "description": "Max time to wait for a batch to finish committing", - "type": "string", - "default": "10m0s" + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" }, - "batchMode": { - "description": "Upload file batching sync|async|off.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "sync" + "default": "unset" }, - "batchSize": { - "description": "Max number of files in upload batch.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 0 + "default": 4 }, - "batchTimeout": { - "description": "Max time to allow an idle upload batch before uploading.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "0s" + "default": "200Mi" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "default": "Slash,CrLf,InvalidUtf8,Dot" + "default": "unset" }, - "includeArchived": { - "description": "Also view and download archived media.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "readOnly": { - "description": "Set to make the Google Photos backend read only.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "readSize": { - "description": "Set to read the size of media items.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "startYear": { - "description": "Year limits the photos to be downloaded to those which are uploaded after the given year.", - "type": "integer", - "default": 2000 + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.hdfsConfig": { + "storage.s3DreamhostConfig": { "type": "object", "properties": { - "dataTransferProtection": { - "description": "Kerberos data transfer protection: authentication|integrity|privacy.", - "type": "string", - "example": "privacy" - }, - "description": { - "description": "Description of the remote.", - "type": "string" - }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,Colon,Del,Ctl,InvalidUtf8,Dot" - }, - "namenode": { - "description": "Hadoop name nodes and ports.", + "accessKeyId": { + "description": "AWS Access Key ID.", "type": "string" }, - "servicePrincipalName": { - "description": "Kerberos service principal name for the namenode.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, - "username": { - "description": "Hadoop user name.", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "root" - } - } - }, - "storage.hidriveConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "example": "private" }, "chunkSize": { - "description": "Chunksize for chunked uploads.", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "48Mi" + "default": "5Mi" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, - "disableFetchingMemberCount": { - "description": "Do not fetch number of objects in directories unless it is absolutely necessary.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,Dot" + "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for the service.", + "description": "Endpoint for S3 API.", "type": "string", - "default": "https://api.hidrive.strato.com/2.1" + "example": "objects-us-east-1.dream.io" }, - "rootPrefix": { - "description": "The root/parent folder for all paths.", - "type": "string", - "default": "/", - "example": "/" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "scopeAccess": { - "description": "Access permissions that rclone should use when requesting access from HiDrive.", - "type": "string", - "default": "rw", - "example": "rw" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "scopeRole": { - "description": "User-level that rclone should use when requesting access from HiDrive.", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "user", - "example": "user" + "default": "unset" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "tokenUrl": { - "description": "Token server url.", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" }, - "uploadConcurrency": { - "description": "Concurrency for chunked uploads.", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", "type": "integer", - "default": 4 + "default": 10000 }, - "uploadCutoff": { - "description": "Cutoff/Threshold for chunked uploads.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "default": "96Mi" - } - } - }, - "storage.httpConfig": { - "type": "object", - "properties": { - "description": { - "description": "Description of the remote.", - "type": "string" + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "headers": { - "description": "Set HTTP headers for all transactions.", - "type": "string" + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, - "noEscape": { - "description": "Do not escape URL metacharacters in path names.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, "noHead": { - "description": "Don't use HEAD requests.", + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "noSlash": { - "description": "Set this if the site doesn't end directories with /.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "url": { - "description": "URL of HTTP host to connect to.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.internetarchiveConfig": { - "type": "object", - "properties": { - "accessKeyId": { - "description": "IAS3 Access Key.", + }, + "region": { + "description": "Region to connect to.", "type": "string" }, - "description": { - "description": "Description of the remote.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "disableChecksum": { - "description": "Don't ask the server to test against MD5 checksum calculated by rclone.", - "type": "boolean", - "default": true + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot" + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "endpoint": { - "description": "IAS3 Endpoint.", - "type": "string", - "default": "https://s3.us.archive.org" + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "frontEndpoint": { - "description": "Host of InternetArchive Frontend.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "https://archive.org" + "default": "Off" }, "secretAccessKey": { - "description": "IAS3 Secret Key (password).", + "description": "AWS Secret Access Key (password).", "type": "string" }, - "waitArchive": { - "description": "Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish.", - "type": "string", - "default": "0s" - } - } - }, - "storage.jottacloudConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "clientId": { - "description": "OAuth Client Id.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "encoding": { - "description": "The encoding for the backend.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot" + "default": "200Mi" }, - "hardDelete": { - "description": "Delete files permanently rather than putting them into the trash.", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "md5MemoryLimit": { - "description": "Files bigger than this will be cached on disk to calculate the MD5 if required.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "default": "10Mi" + "default": "unset" }, - "noVersions": { - "description": "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "trashedOnly": { - "description": "Only show files that are in the trash.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "uploadResumeLimit": { - "description": "Files bigger than this can be resumed if the upload fail's.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "default": "10Mi" - } - } - }, - "storage.koofrDigistorageConfig": { - "type": "object", - "properties": { - "description": { - "description": "Description of the remote.", - "type": "string" + "default": "unset" }, - "encoding": { - "description": "The encoding for the backend.", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "unset" }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "password": { - "description": "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.", - "type": "string" + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" }, - "setmtime": { - "description": "Does the backend support setting modification time.", + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", - "default": true + "default": false }, - "user": { - "description": "Your user name.", - "type": "string" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.koofrKoofrConfig": { + "storage.s3ExabaConfig": { "type": "object", "properties": { - "description": { - "description": "Description of the remote.", + "accessKeyId": { + "description": "AWS Access Key ID.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "example": "private" }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "password": { - "description": "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "setmtime": { - "description": "Does the backend support setting modification time.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", - "default": true + "default": false }, - "user": { - "description": "Your user name.", - "type": "string" - } - } - }, - "storage.koofrOtherConfig": { - "type": "object", - "properties": { "description": { "description": "Description of the remote.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "The Koofr API endpoint to use.", - "type": "string" - }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" - }, - "password": { - "description": "Your password for rclone (generate one at your service's settings page).", - "type": "string" - }, - "setmtime": { - "description": "Does the backend support setting modification time.", - "type": "boolean", - "default": true - }, - "user": { - "description": "Your user name.", - "type": "string" - } - } - }, - "storage.localConfig": { - "type": "object", - "properties": { - "caseInsensitive": { - "description": "Force the filesystem to report itself as case insensitive.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "caseSensitive": { - "description": "Force the filesystem to report itself as case sensitive.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "copyLinks": { - "description": "Follow symlinks and copy the pointed to item.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "description": { - "description": "Description of the remote.", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "links": { - "description": "Translate symlinks to/from regular files with a '.rclonelink' extension.", - "type": "boolean", - "default": false + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string" }, - "noCheckUpdated": { - "description": "Don't check to see if the files change during upload.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", - "default": false + "default": false, + "example": false }, - "noClone": { - "description": "Disable reflink cloning for server-side copies.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", - "default": false + "default": true }, - "noPreallocate": { - "description": "Disable preallocation of disk space for transferred files.", - "type": "boolean", - "default": false + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "noSetModtime": { - "description": "Disable setting modtime.", - "type": "boolean", - "default": false + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" }, - "noSparse": { - "description": "Disable sparse files for multi-thread downloads.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "nounc": { - "description": "Disable UNC (long path names) conversion on Windows.", - "type": "boolean", - "default": false, - "example": true + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, - "oneFileSystem": { - "description": "Don't cross filesystem boundaries (unix/macOS only).", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "skipLinks": { - "description": "Don't warn about skipped symlinks.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "timeType": { - "description": "Set what kind of time is returned.", - "type": "string", - "default": "mtime", - "example": "mtime" - }, - "unicodeNormalization": { - "description": "Apply unicode NFC normalization to paths and filenames.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "zeroSizeLinks": { - "description": "Assume the Stat size of links is zero (and read them instead) (deprecated).", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false - } - } - }, - "storage.mailruConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "checkHash": { - "description": "What should copy do if file checksum is mismatched or invalid.", - "type": "boolean", - "default": true, - "example": true + "region": { + "description": "Region to connect to.", + "type": "string" }, - "clientId": { - "description": "OAuth Client Id.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "description": { - "description": "Description of the remote.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Off" }, - "pass": { - "description": "Password.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "quirks": { - "description": "Comma separated list of internal maintenance flags.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "speedupEnable": { - "description": "Skip full upload if there is another file with same data hash.", - "type": "boolean", - "default": true, - "example": true + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" }, - "speedupFilePatterns": { - "description": "Comma separated list of file name patterns eligible for speedup (put by hash).", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", - "example": "" + "default": "unset" }, - "speedupMaxDisk": { - "description": "This option allows you to disable speedup (put by hash) for large files.", - "type": "string", - "default": "3Gi", - "example": "0" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "speedupMaxMemory": { - "description": "Files larger than the size given below will always be hashed on disk.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "32Mi", - "example": "0" - }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "default": "200Mi" }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "user": { - "description": "User name (usually email).", - "type": "string" + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "userAgent": { - "description": "HTTP user agent used internally by client.", - "type": "string" - } - } - }, - "storage.megaConfig": { - "type": "object", - "properties": { - "debug": { - "description": "Output more debug from Mega.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "description": { - "description": "Description of the remote.", - "type": "string" - }, - "encoding": { - "description": "The encoding for the backend.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" + "default": "unset" }, - "hardDelete": { - "description": "Delete files permanently rather than putting them into the trash.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "pass": { - "description": "Password.", - "type": "string" + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "useHttps": { - "description": "Use HTTPS for transfers.", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "user": { - "description": "User name.", - "type": "string" - } - } - }, - "storage.netstorageConfig": { - "type": "object", - "properties": { - "account": { - "description": "Set the NetStorage account name", - "type": "string" + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" }, - "host": { - "description": "Domain+path of NetStorage host to connect to.", - "type": "string" + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "protocol": { - "description": "Select between HTTP or HTTPS protocol.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "https", - "example": "http" + "default": "off" }, - "secret": { - "description": "Set the NetStorage account secret/G2O key for authentication.", - "type": "string" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.onedriveConfig": { + "storage.s3FileLuConfig": { "type": "object", "properties": { - "accessScopes": { - "description": "Set scopes to be requested by rclone.", - "type": "string", - "default": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", - "example": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "authUrl": { - "description": "Auth server URL.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, - "avOverride": { - "description": "Allows download of files the server thinks has a virus.", - "type": "boolean", - "default": false + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" }, "chunkSize": { - "description": "Chunk size to upload files with - must be multiple of 320k (327,680 bytes).", + "description": "Chunk size to use for uploading.", "type": "string", - "default": "10Mi" - }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "default": "5Mi" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "delta": { - "description": "If set rclone will use delta listing to implement recursive listings.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, @@ -11260,981 +16427,1000 @@ "description": "Description of the remote.", "type": "string" }, - "disableSitePermission": { - "description": "Disable the request for Sites.Read.All permission.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "driveId": { - "description": "The ID of the drive to use.", - "type": "string" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "driveType": { - "description": "The type of the drive (personal | business | documentLibrary).", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "exposeOnenoteFiles": { - "description": "Set to make OneNote files show up in directory listings.", + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string", + "example": "s5lu.com" + }, + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", - "default": false + "default": false, + "example": false }, - "hardDelete": { - "description": "Permanently delete files on removal.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", - "default": false + "default": true }, - "hashType": { - "description": "Specify the hash in use for the backend.", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "auto", - "example": "auto" + "default": "unset" }, - "linkPassword": { - "description": "Set the password for links created by the link command.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "linkScope": { - "description": "Set the scope of the links created by the link command.", - "type": "string", - "default": "anonymous", - "example": "anonymous" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "linkType": { - "description": "Set the type of the links created by the link command.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "default": "view", - "example": "view" + "default": "1m0s" }, - "listChunk": { - "description": "Size of listing chunk.", - "type": "integer", - "default": 1000 + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "metadataPermissions": { - "description": "Control whether permissions should be read or written in metadata.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "off", - "example": "off" + "default": "unset" }, - "noVersions": { - "description": "Remove all versions on modifying operations.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" + }, "region": { - "description": "Choose national cloud region for OneDrive.", + "description": "Region to connect to.", "type": "string", - "default": "global", "example": "global" }, - "rootFolderId": { - "description": "ID of the root folder.", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "serverSideAcrossConfigs": { - "description": "Deprecated: use --server-side-across-configs instead.", - "type": "boolean", - "default": false + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" - } - } - }, - "storage.oosEnv_authConfig": { - "type": "object", - "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false }, - "chunkSize": { - "description": "Chunk size to use for uploading.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "5Mi" + "default": "Off" }, - "compartment": { - "description": "Object storage compartment OCID", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", - "type": "string", - "default": "4.656Gi" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "copyTimeout": { - "description": "Timeout for copy.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "1m0s" + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", - "type": "boolean", - "default": false + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "encoding": { - "description": "The encoding for the backend.", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" + "default": "unset" }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 - }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", - "type": "string" - }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "" + "default": "unset" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "example": "" + "default": "unset" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "off" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 10 + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosInstance_principal_authConfig": { + "storage.s3FlashBladeConfig": { "type": "object", "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Object storage API.", + "description": "Endpoint for S3 API.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", - "default": false + "default": false, + "example": false + }, + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, "noCheckBucket": { "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", - "type": "string", - "example": "" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", - "type": "string", - "example": "" + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" + }, + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 10 + "default": 4 }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", "default": "200Mi" - } - } - }, - "storage.oosNo_authConfig": { - "type": "object", - "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false - }, - "chunkSize": { - "description": "Chunk size to use for uploading.", - "type": "string", - "default": "5Mi" - }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", - "type": "string", - "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "default": "1m0s" - }, - "description": { - "description": "Description of the remote.", - "type": "string" - }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", - "type": "boolean", - "default": false + "default": "unset" }, - "encoding": { - "description": "The encoding for the backend.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "default": "unset" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 - }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", - "type": "string" - }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "" + "default": "unset" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "example": "" + "default": "unset" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "off" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 10 + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosResource_principal_authConfig": { + "storage.s3GCSConfig": { "type": "object", "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, "description": { "description": "Description of the remote.", "type": "string" }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" - }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 - }, - "namespace": { - "description": "Object storage namespace", - "type": "string" - }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", - "type": "string", - "example": "" - }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "" + "default": "Slash,InvalidUtf8,Dot" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "" + "example": "https://storage.googleapis.com" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", - "type": "string", - "default": "Standard", - "example": "Standard" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", - "default": 10 + "default": 1000 }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "200Mi" - } - } - }, - "storage.oosUser_principal_authConfig": { - "type": "object", - "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false + "default": "unset" }, - "chunkSize": { - "description": "Chunk size to use for uploading.", - "type": "string", - "default": "5Mi" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "compartment": { - "description": "Object storage compartment OCID", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" }, - "configFile": { - "description": "Path to OCI config file", - "type": "string", - "default": "~/.oci/config", - "example": "~/.oci/config" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "configProfile": { - "description": "Profile name inside the oci config file", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "default": "Default", - "example": "Default" + "default": "1m0s" }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", - "type": "string", - "default": "4.656Gi" + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "copyTimeout": { - "description": "Timeout for copy.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "1m0s" + "default": "unset" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" }, - "namespace": { - "description": "Object storage namespace", + "region": { + "description": "Region to connect to.", "type": "string" }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", - "type": "boolean", - "default": false + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" }, - "region": { - "description": "Object storage Region", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", - "type": "string", - "example": "" + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", - "type": "string", - "example": "" + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", - "type": "string", - "example": "" + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 10 + "default": 4 }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", "default": "200Mi" - } - } - }, - "storage.oosWorkload_identity_authConfig": { - "type": "object", - "properties": { - "attemptResumeUpload": { - "description": "If true attempt to resume previously started multipart upload for the object.", - "type": "boolean", - "default": false - }, - "chunkSize": { - "description": "Chunk size to use for uploading.", - "type": "string", - "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "default": "4.656Gi" + "default": "unset" }, - "copyTimeout": { - "description": "Timeout for copy.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "1m0s" - }, - "description": { - "description": "Description of the remote.", - "type": "string" + "default": "unset" }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "default": "unset" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "maxUploadParts": { - "description": "Maximum number of parts in a multipart upload.", - "type": "integer", - "default": 10000 + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", - "type": "string" - }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", - "type": "string", - "example": "" - }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", "type": "string", - "example": "" + "default": "unset" }, - "sseKmsKeyId": { - "description": "if using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "off" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 10 + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.opendriveConfig": { + "storage.s3HetznerConfig": { "type": "object", "properties": { - "chunkSize": { - "description": "Files will be uploaded in chunks this size.", - "type": "string", - "default": "10Mi" + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "description": { - "description": "Description of the remote.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot" + "example": "private" }, - "password": { - "description": "Password.", - "type": "string" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "username": { - "description": "Username.", - "type": "string" - } - } - }, - "storage.pcloudConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "clientId": { - "description": "OAuth Client Id.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false }, - "description": { - "description": "Description of the remote.", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "hostname": { - "description": "Hostname to connect to.", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "default": "api.pcloud.com", - "example": "api.pcloud.com" + "example": "hel1.your-objectstorage.com" }, - "password": { - "description": "Your pcloud password.", - "type": "string" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "rootFolderId": { - "description": "Fill in for rclone to use a non root folder as its starting point.", - "type": "string", - "default": "d0" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" }, - "username": { - "description": "Your pcloud username.", - "type": "string" - } - } - }, - "storage.premiumizemeConfig": { - "type": "object", - "properties": { - "apiKey": { - "description": "API Key.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "authUrl": { - "description": "Auth server URL.", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" }, - "description": { - "description": "Description of the remote.", - "type": "string" + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "unset" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "tokenUrl": { - "description": "Token server url.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.putioConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "hel1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "clientId": { - "description": "OAuth Client Id.", + "roleExternalId": { + "description": "External ID for assumed role.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "description": { - "description": "Description of the remote.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Off" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" - } - } - }, - "storage.qingstorConfig": { - "type": "object", - "properties": { - "accessKeyId": { - "description": "QingStor Access Key ID.", + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "chunkSize": { - "description": "Chunk size to use for uploading.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "default": "4Mi" + "default": "unset" }, - "connectionRetries": { - "description": "Number of connection retries.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 3 + "default": 4 }, - "description": { - "description": "Description of the remote.", - "type": "string" + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "encoding": { - "description": "The encoding for the backend.", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "default": "Slash,Ctl,InvalidUtf8" + "default": "unset" }, - "endpoint": { - "description": "Enter an endpoint URL to connection QingStor API.", - "type": "string" + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" }, - "envAuth": { - "description": "Get QingStor credentials from runtime.", + "useArnRegion": { + "description": "If true, enables arn region support for the service.", "type": "boolean", - "default": false, - "example": false + "default": false }, - "secretAccessKey": { - "description": "QingStor Secret Access Key (password).", - "type": "string" + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 1 + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "200Mi" + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false + }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false }, - "zone": { - "description": "Zone to connect to.", - "type": "string", - "example": "pek3a" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.s3AWSConfig": { + "storage.s3HuaweiOBSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12295,7 +17481,8 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "obs.af-south-1.myhuaweicloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12308,11 +17495,6 @@ "type": "boolean", "default": true }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", - "type": "boolean", - "default": false - }, "listChunk": { "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", @@ -12328,11 +17510,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string", - "example": "" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -12380,12 +17557,23 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "us-east-1" + "example": "af-south-1" }, - "requesterPays": { - "description": "Enables requester pays option when interacting with S3 bucket.", - "type": "boolean", - "default": false + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -12396,11 +17584,6 @@ "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -12409,39 +17592,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", - "type": "string", - "example": "" - }, - "storageClass": { - "description": "The storage class to use when storing new objects in S3.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" - }, - "stsEndpoint": { - "description": "Endpoint for STS (deprecated).", - "type": "string" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -12453,11 +17607,6 @@ "type": "string", "default": "200Mi" }, - "useAccelerateEndpoint": { - "description": "If true use the AWS S3 accelerated endpoint.", - "type": "boolean", - "default": false - }, "useAcceptEncodingGzip": { "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", @@ -12468,6 +17617,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -12493,6 +17652,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12515,7 +17679,7 @@ } } }, - "storage.s3AlibabaConfig": { + "storage.s3IBMCOSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12524,7 +17688,8 @@ }, "acl": { "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" + "type": "string", + "example": "private" }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", @@ -12575,9 +17740,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for OSS API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "oss-accelerate.aliyuncs.com" + "example": "s3.us.cloud-object-storage.appdomain.cloud" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12590,6 +17755,14 @@ "type": "boolean", "default": true }, + "ibmApiKey": { + "description": "IBM API Key to be used to obtain IAM token", + "type": "string" + }, + "ibmResourceInstanceId": { + "description": "IBM service instance id", + "type": "string" + }, "listChunk": { "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", @@ -12605,6 +17778,11 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string", + "example": "us-standard" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -12649,6 +17827,26 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -12666,10 +17864,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in OSS.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -12691,6 +17889,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -12716,6 +17924,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12738,7 +17951,7 @@ } } }, - "storage.s3ArvanCloudConfig": { + "storage.s3IDriveConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12797,11 +18010,6 @@ "type": "string", "default": "Slash,InvalidUtf8,Dot" }, - "endpoint": { - "description": "Endpoint for Arvan Cloud Object Storage (AOS) API.", - "type": "string", - "example": "s3.ir-thr-at1.arvanstorage.ir" - }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", @@ -12828,11 +18036,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint.", - "type": "string", - "example": "ir-thr-at1" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -12877,6 +18080,22 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -12894,10 +18113,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in ArvanCloud.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "STANDARD" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -12919,6 +18138,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -12944,6 +18173,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12966,7 +18200,7 @@ } } }, - "storage.s3CephConfig": { + "storage.s3IONOSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13027,7 +18261,8 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3-eu-central-1.ionoscloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13055,10 +18290,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -13106,7 +18337,23 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "eu-central-2" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -13117,11 +18364,6 @@ "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -13130,30 +18372,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -13175,6 +18397,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -13200,6 +18432,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13222,7 +18459,7 @@ } } }, - "storage.s3ChinaMobileConfig": { + "storage.s3IntercoloConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13282,9 +18519,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "eos-wuxi-1.cmecloud.cn" + "example": "de-fra.i3storage.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13312,11 +18549,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint.", - "type": "string", - "example": "wuxi1" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -13361,52 +18593,48 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "sdkLogMode": { - "description": "Set to debug the SDK", + "region": { + "description": "Region to connect to.", "type": "string", - "default": "Off" + "example": "de-fra" }, - "secretAccessKey": { - "description": "AWS Secret Access Key (password).", + "roleArn": { + "description": "ARN of the IAM role to assume.", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" }, - "sessionToken": { - "description": "An AWS session token.", + "roleSessionDuration": { + "description": "Session duration for assumed role.", "type": "string" }, - "sharedCredentialsFile": { - "description": "Path to the shared credentials file.", + "roleSessionName": { + "description": "Session name for assumed role.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in ChinaMobile.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -13428,6 +18656,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -13453,6 +18691,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13475,13 +18718,17 @@ } } }, - "storage.s3CloudflareConfig": { + "storage.s3LeviiaConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -13532,7 +18779,8 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.leviia.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13606,8 +18854,23 @@ }, "region": { "description": "Region to connect to.", - "type": "string", - "example": "auto" + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -13626,6 +18889,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -13646,6 +18914,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -13671,6 +18949,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13693,7 +18976,7 @@ } } }, - "storage.s3DigitalOceanConfig": { + "storage.s3LiaraConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13755,7 +19038,7 @@ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "syd1.digitaloceanspaces.com" + "example": "storage.iran.liara.space" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13783,10 +19066,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -13831,10 +19110,21 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -13853,6 +19143,15 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -13873,6 +19172,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -13898,6 +19207,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13920,7 +19234,7 @@ } } }, - "storage.s3DreamhostConfig": { + "storage.s3LinodeConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13982,7 +19296,7 @@ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "objects-us-east-1.dream.io" + "example": "nl-ams-1.linodeobjects.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14010,10 +19324,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -14058,10 +19368,21 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -14080,6 +19401,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -14100,6 +19426,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -14125,6 +19461,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14147,7 +19488,7 @@ } } }, - "storage.s3GCSConfig": { + "storage.s3LyveCloudConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14207,9 +19548,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Google Cloud Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "https://storage.googleapis.com" + "example": "s3.us-west-1.{account_name}.lyve.seagate.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14287,8 +19628,23 @@ }, "region": { "description": "Region to connect to.", - "type": "string", - "example": "" + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -14307,6 +19663,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -14327,6 +19688,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -14352,6 +19723,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14374,7 +19750,7 @@ } } }, - "storage.s3HuaweiOBSConfig": { + "storage.s3MagaluConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14434,9 +19810,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for OBS API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "obs.af-south-1.myhuaweicloud.com" + "example": "br-se1.magaluobjects.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14508,10 +19884,21 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.", - "type": "string", - "example": "af-south-1" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -14530,6 +19917,15 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -14550,6 +19946,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -14575,6 +19981,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14597,18 +20008,13 @@ } } }, - "storage.s3IBMCOSConfig": { + "storage.s3MegaConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string", - "example": "private" - }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -14658,9 +20064,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for IBM COS S3 API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.us.cloud-object-storage.appdomain.cloud" + "example": "s3.eu-central-1.s4.mega.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14688,11 +20094,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint when using IBM Cloud Public.", - "type": "string", - "example": "us-standard" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -14737,10 +20138,21 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -14759,6 +20171,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -14779,6 +20196,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -14804,6 +20231,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14826,7 +20258,7 @@ } } }, - "storage.s3IDriveConfig": { + "storage.s3MinioConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14885,6 +20317,10 @@ "type": "string", "default": "Slash,InvalidUtf8,Dot" }, + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string" + }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", @@ -14911,6 +20347,10 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -14955,6 +20395,26 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -14964,13 +20424,47 @@ "description": "AWS Secret Access Key (password).", "type": "string" }, - "sessionToken": { - "description": "An AWS session token.", - "type": "string" + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string" + }, + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "sharedCredentialsFile": { - "description": "Path to the shared credentials file.", - "type": "string" + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", + "type": "string", + "example": "" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -14992,6 +20486,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15017,6 +20521,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15039,7 +20548,7 @@ } } }, - "storage.s3IONOSConfig": { + "storage.s3NeteaseConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15099,9 +20608,8 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for IONOS S3 Object Storage.", - "type": "string", - "example": "s3-eu-central-1.ionoscloud.com" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15129,6 +20637,10 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -15174,9 +20686,24 @@ "type": "string" }, "region": { - "description": "Region where your bucket will be created and your data stored.", - "type": "string", - "example": "de" + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -15195,6 +20722,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -15215,6 +20747,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15240,6 +20782,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15262,7 +20809,7 @@ } } }, - "storage.s3LeviiaConfig": { + "storage.s3OVHcloudConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15323,7 +20870,8 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.gra.io.cloud.ovh.net" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15398,7 +20946,23 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "gra" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -15417,6 +20981,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -15437,6 +21006,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15462,6 +21041,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15484,7 +21068,7 @@ } } }, - "storage.s3LiaraConfig": { + "storage.s3OtherConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15544,9 +21128,8 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Liara Object Storage API.", - "type": "string", - "example": "storage.iran.liara.space" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15574,6 +21157,10 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -15618,6 +21205,26 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -15635,10 +21242,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Liara", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "STANDARD" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -15660,6 +21267,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15685,6 +21302,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15707,7 +21329,7 @@ } } }, - "storage.s3LinodeConfig": { + "storage.s3OutscaleConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15767,9 +21389,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Linode Object Storage API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "us-southeast-1.linodeobjects.com" + "example": "oos.eu-west-2.outscale.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15841,6 +21463,27 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "eu-west-2" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -15858,6 +21501,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -15878,6 +21526,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -15903,6 +21561,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15925,7 +21588,7 @@ } } }, - "storage.s3LyveCloudConfig": { + "storage.s3PetaboxConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15987,7 +21650,7 @@ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.us-east-1.lyvecloud.seagate.com" + "example": "s3.petabox.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -16015,10 +21678,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -16066,7 +21725,23 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "eu-central-1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -16085,6 +21760,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -16105,6 +21785,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -16130,6 +21820,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -16152,7 +21847,7 @@ } } }, - "storage.s3MagaluConfig": { + "storage.s3QiniuConfig": { "type": "object", "properties": { "accessKeyId": { @@ -16214,7 +21909,7 @@ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "br-se1.magaluobjects.com" + "example": "s3-cn-east-1.qiniucs.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -16242,6 +21937,11 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string", + "example": "cn-east-1" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -16286,6 +21986,27 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "cn-east-1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -16303,10 +22024,15 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "storageClass": { - "description": "The storage class to use when storing new objects in Magalu.", + "description": "The storage class to use when storing new objects in S3.", "type": "string", - "example": "STANDARD" + "example": "LINE" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -16328,6 +22054,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -16353,6 +22089,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -16375,22 +22116,13 @@ } } }, - "storage.s3MinioConfig": { + "storage.s3RabataConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -16436,7 +22168,8 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.us-east-1.rabata.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -16466,7 +22199,8 @@ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string" + "type": "string", + "example": "us-east-1" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -16515,7 +22249,23 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "eu-west-1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -16526,11 +22276,6 @@ "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -16539,30 +22284,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -16584,6 +22309,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -16609,6 +22344,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -16631,7 +22371,7 @@ } } }, - "storage.s3NeteaseConfig": { + "storage.s3RackCorpConfig": { "type": "object", "properties": { "accessKeyId": { @@ -16692,7 +22432,8 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.rackcorp.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -16722,7 +22463,8 @@ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string" + "type": "string", + "example": "global" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -16771,7 +22513,23 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "global" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -16790,6 +22548,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -16810,6 +22573,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -16835,6 +22608,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -16857,22 +22635,13 @@ } } }, - "storage.s3OtherConfig": { + "storage.s3RcloneConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -16946,10 +22715,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -16994,10 +22759,21 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17016,6 +22792,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -17036,6 +22817,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17061,6 +22852,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17083,7 +22879,7 @@ } } }, - "storage.s3PetaboxConfig": { + "storage.s3ScalewayConfig": { "type": "object", "properties": { "accessKeyId": { @@ -17143,9 +22939,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Petabox S3 Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.petabox.io" + "example": "s3.nl-ams.scw.cloud" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -17218,9 +23014,25 @@ "type": "string" }, "region": { - "description": "Region where your bucket will be created and your data stored.", + "description": "Region to connect to.", "type": "string", - "example": "us-east-1" + "example": "nl-ams" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17239,6 +23051,16 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -17259,6 +23081,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17284,6 +23116,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17306,7 +23143,7 @@ } } }, - "storage.s3QiniuConfig": { + "storage.s3SeaweedFSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -17366,9 +23203,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Qiniu Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3-cn-east-1.qiniucs.com" + "example": "localhost:8333" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -17398,8 +23235,7 @@ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string", - "example": "cn-east-1" + "type": "string" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -17447,8 +23283,23 @@ }, "region": { "description": "Region to connect to.", - "type": "string", - "example": "cn-east-1" + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17467,10 +23318,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Qiniu.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "STANDARD" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -17492,6 +23343,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17517,6 +23378,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17539,22 +23405,13 @@ } } }, - "storage.s3RackCorpConfig": { + "storage.s3SelectelConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -17599,9 +23456,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for RackCorp Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.rackcorp.com" + "example": "s3.ru-1.storage.selcloud.ru" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -17629,11 +23486,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - the location where your bucket will be located and your data stored.", - "type": "string", - "example": "global" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -17679,9 +23531,25 @@ "type": "string" }, "region": { - "description": "region - the location where your bucket will be created and your data stored.", + "description": "Region to connect to.", "type": "string", - "example": "global" + "example": "ru-3" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17700,6 +23568,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -17720,6 +23593,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17745,6 +23628,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17767,17 +23655,13 @@ } } }, - "storage.s3RcloneConfig": { + "storage.s3ServercoreConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -17828,7 +23712,8 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "s3.ru-1.storage.selcloud.ru" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -17856,10 +23741,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -17907,7 +23788,23 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "uz-2" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -17926,6 +23823,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -17946,6 +23848,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -17971,6 +23883,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -17993,22 +23910,13 @@ } } }, - "storage.s3ScalewayConfig": { + "storage.s3SpectraLogicConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -18053,9 +23961,8 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Scaleway Object Storage.", - "type": "string", - "example": "s3.nl-ams.scw.cloud" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18127,10 +24034,21 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "nl-ams" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -18149,10 +24067,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in S3.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -18174,6 +24092,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -18199,6 +24127,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18221,7 +24154,7 @@ } } }, - "storage.s3SeaweedFSConfig": { + "storage.s3StackPathConfig": { "type": "object", "properties": { "accessKeyId": { @@ -18283,7 +24216,7 @@ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "localhost:8333" + "example": "s3.us-east-2.stackpathstorage.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18311,10 +24244,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -18361,8 +24290,23 @@ }, "region": { "description": "Region to connect to.", - "type": "string", - "example": "" + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -18381,6 +24325,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -18401,6 +24350,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -18426,6 +24385,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18448,22 +24412,13 @@ } } }, - "storage.s3StackPathConfig": { + "storage.s3StorjConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -18508,9 +24463,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for StackPath Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.us-east-2.stackpathstorage.com" + "example": "gateway.storjshare.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18582,10 +24537,21 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -18604,6 +24570,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -18624,6 +24595,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -18649,6 +24630,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18671,18 +24657,13 @@ } } }, - "storage.s3StorjConfig": { + "storage.s3SynologyConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "bucketAcl": { - "description": "Canned ACL used when creating buckets.", - "type": "string", - "example": "private" - }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", @@ -18727,9 +24708,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Storj Gateway.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "gateway.storjshare.io" + "example": "eu-001.s3.synologyc2.net" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18757,6 +24738,10 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -18801,6 +24786,27 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "eu-001" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -18818,6 +24824,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -18838,6 +24849,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -18863,6 +24884,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -18885,13 +24911,18 @@ } } }, - "storage.s3SynologyConfig": { + "storage.s3TencentCOSConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string", + "example": "default" + }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -18941,9 +24972,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Synology C2 Object Storage API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "eu-001.s3.synologyc2.net" + "example": "cos.ap-beijing.myqcloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -18971,10 +25002,6 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -19019,10 +25046,21 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region where your data stored.", - "type": "string", - "example": "eu-001" + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -19041,6 +25079,16 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "ARCHIVE" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -19061,6 +25109,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -19086,6 +25144,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -19108,7 +25171,7 @@ } } }, - "storage.s3TencentCOSConfig": { + "storage.s3WasabiConfig": { "type": "object", "properties": { "accessKeyId": { @@ -19117,8 +25180,7 @@ }, "acl": { "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string", - "example": "default" + "type": "string" }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", @@ -19169,9 +25231,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Tencent COS API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "cos.ap-beijing.myqcloud.com" + "example": "s3.wasabisys.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -19199,6 +25261,10 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", @@ -19243,6 +25309,26 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" + }, "sdkLogMode": { "description": "Set to debug the SDK", "type": "string", @@ -19260,10 +25346,10 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Tencent COS.", + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", "type": "string", - "example": "" + "default": "unset" }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", @@ -19285,6 +25371,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -19310,6 +25406,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -19332,7 +25433,7 @@ } } }, - "storage.s3WasabiConfig": { + "storage.s3ZataConfig": { "type": "object", "properties": { "accessKeyId": { @@ -19394,7 +25495,7 @@ "endpoint": { "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.wasabisys.com" + "example": "idr01.zata.ai" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -19473,7 +25574,23 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "us-east-1" + }, + "roleArn": { + "description": "ARN of the IAM role to assume.", + "type": "string" + }, + "roleExternalId": { + "description": "External ID for assumed role.", + "type": "string" + }, + "roleSessionDuration": { + "description": "Session duration for assumed role.", + "type": "string" + }, + "roleSessionName": { + "description": "Session name for assumed role.", + "type": "string" }, "sdkLogMode": { "description": "Set to debug the SDK", @@ -19492,6 +25609,11 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "signAcceptEncoding": { + "description": "Set if rclone should include Accept-Encoding as part of the signature.", + "type": "string", + "default": "unset" + }, "uploadConcurrency": { "description": "Concurrency for multipart uploads and copies.", "type": "integer", @@ -19512,6 +25634,16 @@ "type": "string", "default": "unset" }, + "useArnRegion": { + "description": "If true, enables arn region support for the service.", + "type": "boolean", + "default": false + }, + "useDataIntegrityProtections": { + "description": "If true use AWS S3 data integrity protections.", + "type": "string", + "default": "unset" + }, "useDualStack": { "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", @@ -19537,6 +25669,11 @@ "type": "string", "default": "unset" }, + "useXId": { + "description": "Set if rclone should add x-id URL parameters.", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -19583,7 +25720,7 @@ "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8" + "default": "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8,Dot" }, "library": { "description": "Name of the library.", @@ -19616,6 +25753,10 @@ "type": "boolean", "default": false }, + "blake3sumCommand": { + "description": "The command used to read BLAKE3 hashes.", + "type": "string" + }, "chunkSize": { "description": "Upload and download chunk size.", "type": "string", @@ -19640,6 +25781,10 @@ "type": "boolean", "default": false }, + "crc32sumCommand": { + "description": "The command used to read CRC-32 hashes.", + "type": "string" + }, "description": { "description": "Description of the remote.", "type": "string" @@ -19659,6 +25804,10 @@ "type": "boolean", "default": false }, + "hashes": { + "description": "Comma separated list of supported checksum types.", + "type": "string" + }, "host": { "description": "SSH host to connect to.", "type": "string" @@ -19667,6 +25816,10 @@ "description": "Space separated list of host key algorithms, ordered by preference.", "type": "string" }, + "httpProxy": { + "description": "URL for HTTP CONNECT proxy", + "type": "string" + }, "idleTimeout": { "description": "Max time before closing idle connections.", "type": "string", @@ -19703,7 +25856,7 @@ "type": "string" }, "md5sumCommand": { - "description": "The command used to read md5 hashes.", + "description": "The command used to read MD5 hashes.", "type": "string" }, "pass": { @@ -19719,6 +25872,10 @@ "type": "integer", "default": 22 }, + "pubkey": { + "description": "SSH public certificate for public certificate based authentication.", + "type": "string" + }, "pubkeyFile": { "description": "Optional path to public key file.", "type": "string" @@ -19737,7 +25894,11 @@ "default": true }, "sha1sumCommand": { - "description": "The command used to read sha1 hashes.", + "description": "The command used to read SHA-1 hashes.", + "type": "string" + }, + "sha256sumCommand": { + "description": "The command used to read SHA-256 hashes.", "type": "string" }, "shellType": { @@ -19778,6 +25939,14 @@ "description": "SSH username.", "type": "string", "default": "$USER" + }, + "xxh128sumCommand": { + "description": "The command used to read XXH128 hashes.", + "type": "string" + }, + "xxh3sumCommand": { + "description": "The command used to read XXH3 hashes.", + "type": "string" } } }, @@ -19793,6 +25962,11 @@ "type": "string", "default": "64Mi" }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, "clientId": { "description": "OAuth Client Id.", "type": "string" @@ -19898,6 +26072,10 @@ "type": "string", "default": "1m0s" }, + "kerberosCcache": { + "description": "Path to the Kerberos credential cache (krb5cc).", + "type": "string" + }, "pass": { "description": "SMB password.", "type": "string" @@ -19911,6 +26089,11 @@ "description": "Service principal name.", "type": "string" }, + "useKerberos": { + "description": "Use Kerberos authentication.", + "type": "boolean", + "default": false + }, "user": { "description": "SMB username.", "type": "string", @@ -20175,32 +26358,14 @@ } } }, - "storage.uptoboxConfig": { + "storage.webdavConfig": { "type": "object", "properties": { - "accessToken": { - "description": "Your access token.", - "type": "string" - }, - "description": { - "description": "Description of the remote.", - "type": "string" - }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot" - }, - "private": { - "description": "Set to make uploaded files private", + "authRedirect": { + "description": "Preserve authentication on redirect.", "type": "boolean", "default": false - } - } - }, - "storage.webdavConfig": { - "type": "object", - "properties": { + }, "bearerToken": { "description": "Bearer token instead of user/pass (e.g. a Macaroon).", "type": "string" @@ -20271,6 +26436,11 @@ "description": "Auth server URL.", "type": "string" }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, "clientId": { "description": "OAuth Client Id.", "type": "string" @@ -20315,6 +26485,11 @@ "description": "Auth server URL.", "type": "string" }, + "clientCredentials": { + "description": "Use client credentials OAuth flow.", + "type": "boolean", + "default": false + }, "clientId": { "description": "OAuth Client Id.", "type": "string" @@ -20344,6 +26519,11 @@ "tokenUrl": { "description": "Token server url.", "type": "string" + }, + "uploadCutoff": { + "description": "Cutoff for switching to large file upload api (\u003e= 10 MiB).", + "type": "string", + "default": "10Mi" } } }, diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index 032c4aa59..04dbbc8f7 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -966,6 +966,17 @@ definitions: default: false description: Send the certificate chain when using certificate auth. type: boolean + connectionString: + description: Storage Connection String. + type: string + copyConcurrency: + default: 512 + description: Concurrency for multipart copy. + type: integer + copyCutoff: + default: 8Mi + description: Cutoff for switching to multipart copy. + type: string deleteSnapshots: description: Set to specify how to deal with snapshots on blob deletion. example: "" @@ -982,6 +993,10 @@ definitions: default: false description: Don't store MD5 checksum with object metadata. type: boolean + disableInstanceDiscovery: + default: false + description: Skip requesting Microsoft Entra instance metadata + type: boolean encoding: default: Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8 description: The encoding for the backend. @@ -1053,6 +1068,15 @@ definitions: uploadCutoff: description: Cutoff for switching to chunked upload (<= 256 MiB) (deprecated). type: string + useAz: + default: false + description: Use Azure CLI tool az for authentication + type: boolean + useCopyBlob: + default: true + description: Whether to use the Copy Blob API when copying to the same storage + account. + type: boolean useEmulator: default: false description: Uses local storage emulator if provided as 'true'. @@ -1123,6 +1147,26 @@ definitions: description: Whether to use mmap buffers in internal memory pool. (no longer used) type: boolean + sseCustomerAlgorithm: + description: If using SSE-C, the server-side encryption algorithm used when + storing this object in B2. + example: "" + type: string + sseCustomerKey: + description: To use SSE-C, you may provide the secret encryption key encoded + in a UTF-8 compatible string to encrypt/decrypt your data + example: "" + type: string + sseCustomerKeyBase64: + description: To use SSE-C, you may provide the secret encryption key encoded + in Base64 format to encrypt/decrypt your data + example: "" + type: string + sseCustomerKeyMd5: + description: If using SSE-C you may provide the secret encryption key MD5 + checksum (optional). + example: "" + type: string testMode: description: A flag string for X-Bz-Test-Mode header for debugging. type: string @@ -1158,6 +1202,10 @@ definitions: default: user example: user type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -1168,6 +1216,9 @@ definitions: default: 100 description: Max number of times to try committing a multipart file. type: integer + configCredentials: + description: Box App config.json contents. + type: string description: description: Description of the remote. type: string @@ -1435,22 +1486,6 @@ definitions: type: string type: object storage.createInternetarchiveStorageRequest: - properties: - clientConfig: - allOf: - - $ref: '#/definitions/model.ClientConfig' - description: config for underlying HTTP client - config: - allOf: - - $ref: '#/definitions/storage.internetarchiveConfig' - description: config for the storage - name: - description: Name of the storage, must be unique - example: my-storage - type: string - path: - description: Path of the storage - type: string type: object storage.createJottacloudStorageRequest: properties: @@ -1866,6 +1901,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3BizflyCloudStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3BizflyCloudConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3CephStorageRequest: properties: clientConfig: @@ -1920,6 +1973,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3CubbitStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3CubbitConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3DigitalOceanStorageRequest: properties: clientConfig: @@ -1956,6 +2027,60 @@ definitions: description: Path of the storage type: string type: object + storage.createS3ExabaStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3ExabaConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.createS3FileLuStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3FileLuConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.createS3FlashBladeStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3FlashBladeConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3GCSStorageRequest: properties: clientConfig: @@ -1974,6 +2099,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3HetznerStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3HetznerConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3HuaweiOBSStorageRequest: properties: clientConfig: @@ -2046,6 +2189,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3IntercoloStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3IntercoloConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3LeviiaStorageRequest: properties: clientConfig: @@ -2136,7 +2297,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3MinioStorageRequest: + storage.createS3MegaStorageRequest: properties: clientConfig: allOf: @@ -2144,7 +2305,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3MinioConfig' + - $ref: '#/definitions/storage.s3MegaConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2154,7 +2315,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3NeteaseStorageRequest: + storage.createS3MinioStorageRequest: properties: clientConfig: allOf: @@ -2162,7 +2323,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3NeteaseConfig' + - $ref: '#/definitions/storage.s3MinioConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2172,7 +2333,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3OtherStorageRequest: + storage.createS3NeteaseStorageRequest: properties: clientConfig: allOf: @@ -2180,7 +2341,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3OtherConfig' + - $ref: '#/definitions/storage.s3NeteaseConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2190,7 +2351,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3PetaboxStorageRequest: + storage.createS3OVHcloudStorageRequest: properties: clientConfig: allOf: @@ -2198,7 +2359,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3PetaboxConfig' + - $ref: '#/definitions/storage.s3OVHcloudConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2208,7 +2369,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3QiniuStorageRequest: + storage.createS3OtherStorageRequest: properties: clientConfig: allOf: @@ -2216,7 +2377,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3QiniuConfig' + - $ref: '#/definitions/storage.s3OtherConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2226,7 +2387,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3RackCorpStorageRequest: + storage.createS3OutscaleStorageRequest: properties: clientConfig: allOf: @@ -2234,7 +2395,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3RackCorpConfig' + - $ref: '#/definitions/storage.s3OutscaleConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2244,7 +2405,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3RcloneStorageRequest: + storage.createS3PetaboxStorageRequest: properties: clientConfig: allOf: @@ -2252,7 +2413,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3RcloneConfig' + - $ref: '#/definitions/storage.s3PetaboxConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2262,7 +2423,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3ScalewayStorageRequest: + storage.createS3QiniuStorageRequest: properties: clientConfig: allOf: @@ -2270,7 +2431,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3ScalewayConfig' + - $ref: '#/definitions/storage.s3QiniuConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2280,7 +2441,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3SeaweedFSStorageRequest: + storage.createS3RabataStorageRequest: properties: clientConfig: allOf: @@ -2288,7 +2449,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3SeaweedFSConfig' + - $ref: '#/definitions/storage.s3RabataConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2298,7 +2459,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3StackPathStorageRequest: + storage.createS3RackCorpStorageRequest: properties: clientConfig: allOf: @@ -2306,7 +2467,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3StackPathConfig' + - $ref: '#/definitions/storage.s3RackCorpConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2316,7 +2477,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3StorjStorageRequest: + storage.createS3RcloneStorageRequest: properties: clientConfig: allOf: @@ -2324,7 +2485,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3StorjConfig' + - $ref: '#/definitions/storage.s3RcloneConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2334,7 +2495,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3SynologyStorageRequest: + storage.createS3ScalewayStorageRequest: properties: clientConfig: allOf: @@ -2342,7 +2503,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3SynologyConfig' + - $ref: '#/definitions/storage.s3ScalewayConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2352,7 +2513,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3TencentCOSStorageRequest: + storage.createS3SeaweedFSStorageRequest: properties: clientConfig: allOf: @@ -2360,7 +2521,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3TencentCOSConfig' + - $ref: '#/definitions/storage.s3SeaweedFSConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2370,7 +2531,7 @@ definitions: description: Path of the storage type: string type: object - storage.createS3WasabiStorageRequest: + storage.createS3SelectelStorageRequest: properties: clientConfig: allOf: @@ -2378,7 +2539,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.s3WasabiConfig' + - $ref: '#/definitions/storage.s3SelectelConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2388,7 +2549,7 @@ definitions: description: Path of the storage type: string type: object - storage.createSeafileStorageRequest: + storage.createS3ServercoreStorageRequest: properties: clientConfig: allOf: @@ -2396,7 +2557,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.seafileConfig' + - $ref: '#/definitions/storage.s3ServercoreConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2406,7 +2567,7 @@ definitions: description: Path of the storage type: string type: object - storage.createSftpStorageRequest: + storage.createS3SpectraLogicStorageRequest: properties: clientConfig: allOf: @@ -2414,7 +2575,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.sftpConfig' + - $ref: '#/definitions/storage.s3SpectraLogicConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2424,7 +2585,7 @@ definitions: description: Path of the storage type: string type: object - storage.createSharefileStorageRequest: + storage.createS3StackPathStorageRequest: properties: clientConfig: allOf: @@ -2432,7 +2593,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.sharefileConfig' + - $ref: '#/definitions/storage.s3StackPathConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2442,7 +2603,7 @@ definitions: description: Path of the storage type: string type: object - storage.createSiaStorageRequest: + storage.createS3StorjStorageRequest: properties: clientConfig: allOf: @@ -2450,7 +2611,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.siaConfig' + - $ref: '#/definitions/storage.s3StorjConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2460,7 +2621,7 @@ definitions: description: Path of the storage type: string type: object - storage.createSmbStorageRequest: + storage.createS3SynologyStorageRequest: properties: clientConfig: allOf: @@ -2468,7 +2629,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.smbConfig' + - $ref: '#/definitions/storage.s3SynologyConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2478,7 +2639,7 @@ definitions: description: Path of the storage type: string type: object - storage.createStorjExistingStorageRequest: + storage.createS3TencentCOSStorageRequest: properties: clientConfig: allOf: @@ -2486,7 +2647,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.storjExistingConfig' + - $ref: '#/definitions/storage.s3TencentCOSConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2496,7 +2657,7 @@ definitions: description: Path of the storage type: string type: object - storage.createStorjNewStorageRequest: + storage.createS3WasabiStorageRequest: properties: clientConfig: allOf: @@ -2504,7 +2665,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.storjNewConfig' + - $ref: '#/definitions/storage.s3WasabiConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2514,7 +2675,7 @@ definitions: description: Path of the storage type: string type: object - storage.createSugarsyncStorageRequest: + storage.createS3ZataStorageRequest: properties: clientConfig: allOf: @@ -2522,7 +2683,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.sugarsyncConfig' + - $ref: '#/definitions/storage.s3ZataConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2532,7 +2693,7 @@ definitions: description: Path of the storage type: string type: object - storage.createSwiftStorageRequest: + storage.createSeafileStorageRequest: properties: clientConfig: allOf: @@ -2540,7 +2701,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.swiftConfig' + - $ref: '#/definitions/storage.seafileConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2550,7 +2711,7 @@ definitions: description: Path of the storage type: string type: object - storage.createUnionStorageRequest: + storage.createSftpStorageRequest: properties: clientConfig: allOf: @@ -2558,7 +2719,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.unionConfig' + - $ref: '#/definitions/storage.sftpConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2568,7 +2729,7 @@ definitions: description: Path of the storage type: string type: object - storage.createUptoboxStorageRequest: + storage.createSharefileStorageRequest: properties: clientConfig: allOf: @@ -2576,7 +2737,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.uptoboxConfig' + - $ref: '#/definitions/storage.sharefileConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2586,7 +2747,7 @@ definitions: description: Path of the storage type: string type: object - storage.createWebdavStorageRequest: + storage.createSiaStorageRequest: properties: clientConfig: allOf: @@ -2594,7 +2755,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.webdavConfig' + - $ref: '#/definitions/storage.siaConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2604,7 +2765,7 @@ definitions: description: Path of the storage type: string type: object - storage.createYandexStorageRequest: + storage.createSmbStorageRequest: properties: clientConfig: allOf: @@ -2612,7 +2773,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.yandexConfig' + - $ref: '#/definitions/storage.smbConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2622,7 +2783,7 @@ definitions: description: Path of the storage type: string type: object - storage.createZohoStorageRequest: + storage.createStorjExistingStorageRequest: properties: clientConfig: allOf: @@ -2630,7 +2791,7 @@ definitions: description: config for underlying HTTP client config: allOf: - - $ref: '#/definitions/storage.zohoConfig' + - $ref: '#/definitions/storage.storjExistingConfig' description: config for the storage name: description: Name of the storage, must be unique @@ -2640,11 +2801,137 @@ definitions: description: Path of the storage type: string type: object - storage.driveConfig: + storage.createStorjNewStorageRequest: properties: - acknowledgeAbuse: - default: false - description: Set to allow files which return cannotDownloadAbusiveFile to + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.storjNewConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.createSugarsyncStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.sugarsyncConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.createSwiftStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.swiftConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.createUnionStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.unionConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.createWebdavStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.webdavConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.createYandexStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.yandexConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.createZohoStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.zohoConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object + storage.driveConfig: + properties: + acknowledgeAbuse: + default: false + description: Set to allow files which return cannotDownloadAbusiveFile to be downloaded. type: boolean allowImportNameChange: @@ -2666,6 +2953,10 @@ definitions: default: 8Mi description: Upload chunk size. type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: Google Application Client Id type: string @@ -2720,6 +3011,10 @@ definitions: default: 1000 description: Size of listing chunk 100-1000, 0 to disable. type: integer + metadataEnforceExpansiveAccess: + default: false + description: Whether the request should enforce expansive access rules. + type: boolean metadataLabels: default: "off" description: Control whether labels should be read or written in metadata. @@ -2845,7 +3140,8 @@ definitions: type: string batchCommitTimeout: default: 10m0s - description: Max time to wait for a batch to finish committing + description: Max time to wait for a batch to finish committing. (no longer + used) type: string batchMode: default: sync @@ -2863,6 +3159,10 @@ definitions: default: 48Mi description: Upload chunk size (< 150Mi). type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -2876,6 +3176,10 @@ definitions: default: Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot description: The encoding for the backend. type: string + exportFormats: + default: html,md + description: Comma separated list of preferred formats for exporting files + type: string impersonate: description: Impersonate this user when using a business account. type: string @@ -2895,6 +3199,14 @@ definitions: default: false description: Instructs rclone to work on shared folders. type: boolean + showAllExports: + default: false + description: Show all exportable files in listings. + type: boolean + skipExports: + default: false + description: Skip exportable files in all listings. + type: boolean token: description: OAuth Access Token as a JSON blob. type: string @@ -2961,6 +3273,10 @@ definitions: type: object storage.ftpConfig: properties: + allowInsecureTlsCiphers: + default: false + description: Allow insecure TLS ciphers + type: boolean askPassword: default: false description: Allow asking for FTP password when needed. @@ -3009,6 +3325,9 @@ definitions: host: description: FTP host to connect to. type: string + httpProxy: + description: URL for HTTP CONNECT proxy + type: string idleTimeout: default: 1m0s description: Max time before closing idle connections. @@ -3017,6 +3336,10 @@ definitions: default: false description: Do not verify the TLS certificate of the server. type: boolean + noCheckUpload: + default: false + description: Don't check the upload is OK + type: boolean pass: description: FTP password. type: string @@ -3050,6 +3373,9 @@ definitions: type: object storage.gcsConfig: properties: + accessToken: + description: Short-lived access token. + type: string anonymous: default: false description: Access public buckets and objects without credentials. @@ -3065,6 +3391,10 @@ definitions: default: false description: Access checks should use bucket-level IAM policies. type: boolean + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -3088,7 +3418,9 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for the service. + description: Custom endpoint for the storage API. Leave blank to use the provider + default. + example: storage.example.org type: string envAuth: default: false @@ -3139,7 +3471,8 @@ definitions: type: string batchCommitTimeout: default: 10m0s - description: Max time to wait for a batch to finish committing + description: Max time to wait for a batch to finish committing. (no longer + used) type: string batchMode: default: sync @@ -3153,6 +3486,10 @@ definitions: default: 0s description: Max time to allow an idle upload batch before uploading. type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -3170,6 +3507,9 @@ definitions: default: false description: Also view and download archived media. type: boolean + proxy: + description: Use the gphotosdl proxy for downloading the full resolution images + type: string readOnly: default: false description: Set to make the Google Photos backend read only. @@ -3223,6 +3563,10 @@ definitions: default: 48Mi description: Chunksize for chunked uploads. type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -3301,45 +3645,15 @@ definitions: description: URL of HTTP host to connect to. type: string type: object - storage.internetarchiveConfig: - properties: - accessKeyId: - description: IAS3 Access Key. - type: string - description: - description: Description of the remote. - type: string - disableChecksum: - default: true - description: Don't ask the server to test against MD5 checksum calculated - by rclone. - type: boolean - encoding: - default: Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot - description: The encoding for the backend. - type: string - endpoint: - default: https://s3.us.archive.org - description: IAS3 Endpoint. - type: string - frontEndpoint: - default: https://archive.org - description: Host of InternetArchive Frontend. - type: string - secretAccessKey: - description: IAS3 Secret Key (password). - type: string - waitArchive: - default: 0s - description: Timeout for waiting the server's processing tasks (specifically - archive and book_op) to finish. - type: string - type: object storage.jottacloudConfig: properties: authUrl: description: Auth server URL. type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -3476,10 +3790,13 @@ definitions: default: Slash,Dot description: The encoding for the backend. type: string + hashes: + description: Comma separated list of supported checksum types. + type: string links: default: false description: Translate symlinks to/from regular files with a '.rclonelink' - extension. + extension for the local backend. type: boolean noCheckUpdated: default: false @@ -3514,6 +3831,10 @@ definitions: default: false description: Don't warn about skipped symlinks. type: boolean + skipSpecials: + default: false + description: Don't warn about skipped pipes, sockets and device objects. + type: boolean timeType: default: mtime description: Set what kind of time is returned. @@ -3539,6 +3860,10 @@ definitions: description: What should copy do if file checksum is mismatched or invalid. example: true type: boolean + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -3596,6 +3921,10 @@ definitions: type: object storage.megaConfig: properties: + 2fa: + description: The 2FA code of your MEGA account if the account is set up with + one + type: string debug: default: false description: Output more debug from Mega. @@ -3611,9 +3940,15 @@ definitions: default: false description: Delete files permanently rather than putting them into the trash. type: boolean + masterKey: + description: Master key (internal use only) + type: string pass: description: Password. type: string + sessionId: + description: Session (internal use only) + type: string useHttps: default: false description: Use HTTPS for transfers. @@ -3663,6 +3998,10 @@ definitions: description: Chunk size to upload files with - must be multiple of 320k (327,680 bytes). type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -3741,12 +4080,20 @@ definitions: default: false description: 'Deprecated: use --server-side-across-configs instead.' type: boolean + tenant: + description: ID of the service principal's tenant. Also called its directory + ID. + type: string token: description: OAuth Access Token as a JSON blob. type: string tokenUrl: description: Token server url. type: string + uploadCutoff: + default: "off" + description: Cutoff for switching to chunked upload. + type: string type: object storage.oosEnv_authConfig: properties: @@ -3760,7 +4107,7 @@ definitions: description: Chunk size to use for uploading. type: string compartment: - description: Object storage compartment OCID + description: Specify compartment OCID, if you need to list buckets. type: string copyCutoff: default: 4.656Gi @@ -3855,7 +4202,7 @@ definitions: description: Chunk size to use for uploading. type: string compartment: - description: Object storage compartment OCID + description: Specify compartment OCID, if you need to list buckets. type: string copyCutoff: default: 4.656Gi @@ -4042,7 +4389,7 @@ definitions: description: Chunk size to use for uploading. type: string compartment: - description: Object storage compartment OCID + description: Specify compartment OCID, if you need to list buckets. type: string copyCutoff: default: 4.656Gi @@ -4137,7 +4484,7 @@ definitions: description: Chunk size to use for uploading. type: string compartment: - description: Object storage compartment OCID + description: Specify compartment OCID, if you need to list buckets. type: string configFile: default: ~/.oci/config @@ -4242,7 +4589,7 @@ definitions: description: Chunk size to use for uploading. type: string compartment: - description: Object storage compartment OCID + description: Specify compartment OCID, if you need to list buckets. type: string copyCutoff: default: 4.656Gi @@ -4327,6 +4674,12 @@ definitions: type: object storage.opendriveConfig: properties: + access: + default: private + description: Files and folders will be uploaded with this access permission + (default private) + example: private + type: string chunkSize: default: 10Mi description: Files will be uploaded in chunks this size. @@ -4350,6 +4703,10 @@ definitions: authUrl: description: Auth server URL. type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -4393,6 +4750,10 @@ definitions: authUrl: description: Auth server URL. type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -4418,6 +4779,10 @@ definitions: authUrl: description: Auth server URL. type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean clientId: description: OAuth Client Id. type: string @@ -4510,6 +4875,10 @@ definitions: description: description: Description of the remote. type: string + directoryBucket: + default: false + description: Set to use AWS Directory Buckets + type: boolean directoryMarkers: default: false description: Upload an empty object with a trailing slash when a new directory @@ -4609,6 +4978,18 @@ definitions: default: false description: Enables requester pays option when interacting with S3 bucket. type: boolean + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -4619,7 +5000,6 @@ definitions: serverSideEncryption: description: The server-side encryption algorithm used when storing this object in S3. - example: "" type: string sessionToken: description: An AWS session token. @@ -4627,6 +5007,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string sseCustomerAlgorithm: description: If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -4653,7 +5037,7 @@ definitions: type: string storageClass: description: The storage class to use when storing new objects in S3. - example: "" + example: REDUCED_REDUNDANCY type: string stsEndpoint: description: Endpoint for STS (deprecated). @@ -4679,6 +5063,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -4700,6 +5092,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -4766,7 +5162,7 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for OSS API. + description: Endpoint for S3 API. example: oss-accelerate.aliyuncs.com type: string envAuth: @@ -4828,6 +5224,18 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -4841,9 +5249,12 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string storageClass: - description: The storage class to use when storing new objects in OSS. - example: "" + description: The storage class to use when storing new objects in S3. type: string uploadConcurrency: default: 4 @@ -4862,6 +5273,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -4883,6 +5302,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -4949,7 +5372,7 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Arvan Cloud Object Storage (AOS) API. + description: Endpoint for S3 API. example: s3.ir-thr-at1.arvanstorage.ir type: string envAuth: @@ -4975,7 +5398,7 @@ definitions: description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer locationConstraint: - description: Location constraint - must match endpoint. + description: Location constraint - must be set to match the Region. example: ir-thr-at1 type: string maxUploadParts: @@ -5015,6 +5438,18 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -5028,9 +5463,12 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string storageClass: - description: The storage class to use when storing new objects in ArvanCloud. - example: STANDARD + description: The storage class to use when storing new objects in S3. type: string uploadConcurrency: default: 4 @@ -5049,6 +5487,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -5070,6 +5516,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5087,7 +5537,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3CephConfig: + storage.s3BizflyCloudConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5137,6 +5587,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. + example: hn.ss.bfcplatform.vn type: string envAuth: default: false @@ -5160,9 +5611,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -5202,7 +5650,19 @@ definitions: type: string region: description: Region to connect to. - example: "" + example: hn + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -5211,40 +5671,15 @@ definitions: secretAccessKey: description: AWS Secret Access Key (password). type: string - serverSideEncryption: - description: The server-side encryption algorithm used when storing this object - in S3. - example: "" - type: string sessionToken: description: An AWS session token. type: string sharedCredentialsFile: description: Path to the shared credentials file. type: string - sseCustomerAlgorithm: - description: If using SSE-C, the server-side encryption algorithm used when - storing this object in S3. - example: "" - type: string - sseCustomerKey: - description: To use SSE-C you may provide the secret encryption key used to - encrypt/decrypt your data. - example: "" - type: string - sseCustomerKeyBase64: - description: If using SSE-C you must provide the secret encryption key encoded - in base64 format to encrypt/decrypt your data. - example: "" - type: string - sseCustomerKeyMd5: - description: If using SSE-C you may provide the secret encryption key MD5 - checksum (optional). - example: "" - type: string - sseKmsKeyId: - description: If using KMS ID you must provide the ARN of Key. - example: "" + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string uploadConcurrency: default: 4 @@ -5263,6 +5698,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -5284,6 +5727,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5301,7 +5748,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3ChinaMobileConfig: + storage.s3CephConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5350,9 +5797,7 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) - API. - example: eos-wuxi-1.cmecloud.cn + description: Endpoint for S3 API. type: string envAuth: default: false @@ -5377,8 +5822,7 @@ definitions: description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer locationConstraint: - description: Location constraint - must match endpoint. - example: wuxi1 + description: Location constraint - must be set to match the Region. type: string maxUploadParts: default: 10000 @@ -5417,6 +5861,21 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region to connect to. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -5427,7 +5886,6 @@ definitions: serverSideEncryption: description: The server-side encryption algorithm used when storing this object in S3. - example: "" type: string sessionToken: description: An AWS session token. @@ -5435,6 +5893,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string sseCustomerAlgorithm: description: If using SSE-C, the server-side encryption algorithm used when storing this object in S3. @@ -5455,8 +5917,8 @@ definitions: checksum (optional). example: "" type: string - storageClass: - description: The storage class to use when storing new objects in ChinaMobile. + sseKmsKeyId: + description: If using KMS ID you must provide the ARN of Key. example: "" type: string uploadConcurrency: @@ -5476,6 +5938,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -5497,6 +5967,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5514,11 +5988,15 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3CloudflareConfig: + storage.s3ChinaMobileConfig: properties: accessKeyId: description: AWS Access Key ID. type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string bucketAcl: description: Canned ACL used when creating buckets. example: private @@ -5560,6 +6038,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. + example: eos-wuxi-1.cmecloud.cn type: string envAuth: default: false @@ -5583,6 +6062,10 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + example: wuxi1 + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -5620,9 +6103,17 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string - region: - description: Region to connect to. - example: auto + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -5631,12 +6122,43 @@ definitions: secretAccessKey: description: AWS Secret Access Key (password). type: string + serverSideEncryption: + description: The server-side encryption algorithm used when storing this object + in S3. + type: string sessionToken: description: An AWS session token. type: string sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + sseCustomerAlgorithm: + description: If using SSE-C, the server-side encryption algorithm used when + storing this object in S3. + example: "" + type: string + sseCustomerKey: + description: To use SSE-C you may provide the secret encryption key used to + encrypt/decrypt your data. + example: "" + type: string + sseCustomerKeyBase64: + description: If using SSE-C you must provide the secret encryption key encoded + in base64 format to encrypt/decrypt your data. + example: "" + type: string + sseCustomerKeyMd5: + description: If using SSE-C you may provide the secret encryption key MD5 + checksum (optional). + example: "" + type: string + storageClass: + description: The storage class to use when storing new objects in S3. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -5654,6 +6176,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -5675,6 +6205,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5692,19 +6226,11 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3DigitalOceanConfig: + storage.s3CloudflareConfig: properties: accessKeyId: description: AWS Access Key ID. type: string - acl: - description: Canned ACL used when creating buckets and storing or copying - objects. - type: string - bucketAcl: - description: Canned ACL used when creating buckets. - example: private - type: string chunkSize: default: 5Mi description: Chunk size to use for uploading. @@ -5742,7 +6268,6 @@ definitions: type: string endpoint: description: Endpoint for S3 API. - example: syd1.digitaloceanspaces.com type: string envAuth: default: false @@ -5766,9 +6291,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -5808,7 +6330,19 @@ definitions: type: string region: description: Region to connect to. - example: "" + example: auto + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -5823,6 +6357,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -5840,6 +6378,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -5861,6 +6407,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5878,7 +6428,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3DreamhostConfig: + storage.s3CubbitConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5928,7 +6478,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. - example: objects-us-east-1.dream.io + example: s3.cubbit.eu type: string envAuth: default: false @@ -5952,9 +6502,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -5994,7 +6541,19 @@ definitions: type: string region: description: Region to connect to. - example: "" + example: eu-west-1 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -6009,6 +6568,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -6026,6 +6589,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -6047,6 +6618,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6064,7 +6639,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3GCSConfig: + storage.s3DigitalOceanConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6113,8 +6688,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Google Cloud Storage. - example: https://storage.googleapis.com + description: Endpoint for S3 API. + example: syd1.digitaloceanspaces.com type: string envAuth: default: false @@ -6180,7 +6755,18 @@ definitions: type: string region: description: Region to connect to. - example: "" + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -6195,6 +6781,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -6212,6 +6802,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -6233,6 +6831,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6250,7 +6852,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3HuaweiOBSConfig: + storage.s3DreamhostConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6299,8 +6901,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for OBS API. - example: obs.af-south-1.myhuaweicloud.com + description: Endpoint for S3 API. + example: objects-us-east-1.dream.io type: string envAuth: default: false @@ -6324,6 +6926,9 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -6362,9 +6967,19 @@ definitions: description: Profile to use in the shared credentials file. type: string region: - description: Region to connect to. - the location where your bucket will be - created and your data stored. Need bo be same with your endpoint. - example: af-south-1 + description: Region to connect to. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -6379,6 +6994,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -6396,6 +7015,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -6417,6 +7044,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6434,7 +7065,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3IBMCOSConfig: + storage.s3ExabaConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6442,7 +7073,6 @@ definitions: acl: description: Canned ACL used when creating buckets and storing or copying objects. - example: private type: string bucketAcl: description: Canned ACL used when creating buckets. @@ -6484,8 +7114,7 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for IBM COS S3 API. - example: s3.us.cloud-object-storage.appdomain.cloud + description: Endpoint for S3 API. type: string envAuth: default: false @@ -6510,9 +7139,7 @@ definitions: description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer locationConstraint: - description: Location constraint - must match endpoint when using IBM Cloud - Public. - example: us-standard + description: Location constraint - must be set to match the Region. type: string maxUploadParts: default: 10000 @@ -6553,7 +7180,18 @@ definitions: type: string region: description: Region to connect to. - example: "" + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -6568,6 +7206,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -6585,6 +7227,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -6606,6 +7256,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6623,7 +7277,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3IDriveConfig: + storage.s3FileLuConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6671,6 +7325,10 @@ definitions: default: Slash,InvalidUtf8,Dot description: The encoding for the backend. type: string + endpoint: + description: Endpoint for S3 API. + example: s5lu.com + type: string envAuth: default: false description: Get AWS credentials from runtime (environment variables or EC2/ECS @@ -6730,6 +7388,22 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region to connect to. + example: global + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -6743,6 +7417,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -6760,6 +7438,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -6781,6 +7467,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6798,19 +7488,11 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3IONOSConfig: + storage.s3FlashBladeConfig: properties: accessKeyId: description: AWS Access Key ID. type: string - acl: - description: Canned ACL used when creating buckets and storing or copying - objects. - type: string - bucketAcl: - description: Canned ACL used when creating buckets. - example: private - type: string chunkSize: default: 5Mi description: Chunk size to use for uploading. @@ -6847,8 +7529,7 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for IONOS S3 Object Storage. - example: s3-eu-central-1.ionoscloud.com + description: Endpoint for S3 API. type: string envAuth: default: false @@ -6909,11 +7590,19 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string - region: - description: Region where your bucket will be created and your data stored. - example: de + roleArn: + description: ARN of the IAM role to assume. type: string - sdkLogMode: + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: default: "Off" description: Set to debug the SDK type: string @@ -6926,6 +7615,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -6943,6 +7636,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -6964,6 +7665,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6981,7 +7686,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3LeviiaConfig: + storage.s3GCSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -7031,6 +7736,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. + example: https://storage.googleapis.com type: string envAuth: default: false @@ -7054,6 +7760,9 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -7093,7 +7802,18 @@ definitions: type: string region: description: Region to connect to. - example: "" + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -7108,6 +7828,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -7125,6 +7849,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -7146,6 +7878,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7163,7 +7899,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3LiaraConfig: + storage.s3HetznerConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -7212,8 +7948,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Liara Object Storage API. - example: storage.iran.liara.space + description: Endpoint for S3 API. + example: hel1.your-objectstorage.com type: string envAuth: default: false @@ -7237,6 +7973,9 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -7274,6 +8013,22 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region to connect to. + example: hel1 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -7287,9 +8042,9 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string - storageClass: - description: The storage class to use when storing new objects in Liara - example: STANDARD + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string uploadConcurrency: default: 4 @@ -7308,6 +8063,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -7329,6 +8092,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7346,7 +8113,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3LinodeConfig: + storage.s3HuaweiOBSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -7395,8 +8162,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Linode Object Storage API. - example: us-southeast-1.linodeobjects.com + description: Endpoint for S3 API. + example: obs.af-south-1.myhuaweicloud.com type: string envAuth: default: false @@ -7457,6 +8224,22 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region to connect to. + example: af-south-1 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -7470,6 +8253,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -7487,6 +8274,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -7508,6 +8303,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7525,7 +8324,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3LyveCloudConfig: + storage.s3IBMCOSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -7533,6 +8332,7 @@ definitions: acl: description: Canned ACL used when creating buckets and storing or copying objects. + example: private type: string bucketAcl: description: Canned ACL used when creating buckets. @@ -7575,7 +8375,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. - example: s3.us-east-1.lyvecloud.seagate.com + example: s3.us.cloud-object-storage.appdomain.cloud type: string envAuth: default: false @@ -7587,6 +8387,12 @@ definitions: default: true description: If true use path style access if false use virtual hosted style. type: boolean + ibmApiKey: + description: IBM API Key to be used to obtain IAM token + type: string + ibmResourceInstanceId: + description: IBM service instance id + type: string listChunk: default: 1000 description: Size of listing chunk (response list for each ListObject S3 request). @@ -7601,6 +8407,7 @@ definitions: type: integer locationConstraint: description: Location constraint - must be set to match the Region. + example: us-standard type: string maxUploadParts: default: 10000 @@ -7641,7 +8448,18 @@ definitions: type: string region: description: Region to connect to. - example: "" + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -7656,6 +8474,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -7673,6 +8495,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -7694,6 +8524,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7711,7 +8545,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3MagaluConfig: + storage.s3IDriveConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -7759,10 +8593,6 @@ definitions: default: Slash,InvalidUtf8,Dot description: The encoding for the backend. type: string - endpoint: - description: Endpoint for S3 API. - example: br-se1.magaluobjects.com - type: string envAuth: default: false description: Get AWS credentials from runtime (environment variables or EC2/ECS @@ -7822,6 +8652,18 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -7835,9 +8677,9 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string - storageClass: - description: The storage class to use when storing new objects in Magalu. - example: STANDARD + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string uploadConcurrency: default: 4 @@ -7856,6 +8698,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -7877,6 +8727,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7894,7 +8748,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3MinioConfig: + storage.s3IONOSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -7944,6 +8798,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. + example: s3-eu-central-1.ionoscloud.com type: string envAuth: default: false @@ -7967,9 +8822,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -8009,7 +8861,19 @@ definitions: type: string region: description: Region to connect to. - example: "" + example: eu-central-2 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -8018,40 +8882,15 @@ definitions: secretAccessKey: description: AWS Secret Access Key (password). type: string - serverSideEncryption: - description: The server-side encryption algorithm used when storing this object - in S3. - example: "" - type: string sessionToken: description: An AWS session token. type: string sharedCredentialsFile: description: Path to the shared credentials file. type: string - sseCustomerAlgorithm: - description: If using SSE-C, the server-side encryption algorithm used when - storing this object in S3. - example: "" - type: string - sseCustomerKey: - description: To use SSE-C you may provide the secret encryption key used to - encrypt/decrypt your data. - example: "" - type: string - sseCustomerKeyBase64: - description: If using SSE-C you must provide the secret encryption key encoded - in base64 format to encrypt/decrypt your data. - example: "" - type: string - sseCustomerKeyMd5: - description: If using SSE-C you may provide the secret encryption key MD5 - checksum (optional). - example: "" - type: string - sseKmsKeyId: - description: If using KMS ID you must provide the ARN of Key. - example: "" + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string uploadConcurrency: default: 4 @@ -8070,6 +8909,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -8091,6 +8938,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -8108,7 +8959,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3NeteaseConfig: + storage.s3IntercoloConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -8158,6 +9009,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. + example: de-fra.i3storage.com type: string envAuth: default: false @@ -8181,9 +9033,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -8223,7 +9072,19 @@ definitions: type: string region: description: Region to connect to. - example: "" + example: de-fra + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -8238,6 +9099,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -8255,6 +9120,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -8276,6 +9149,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -8293,7 +9170,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3OtherConfig: + storage.s3LeviiaConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -8343,6 +9220,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. + example: s3.leviia.com type: string envAuth: default: false @@ -8366,9 +9244,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -8408,7 +9283,18 @@ definitions: type: string region: description: Region to connect to. - example: "" + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -8423,6 +9309,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -8440,6 +9330,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -8461,6 +9359,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -8478,7 +9380,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3PetaboxConfig: + storage.s3LiaraConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -8527,8 +9429,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Petabox S3 Object Storage. - example: s3.petabox.io + description: Endpoint for S3 API. + example: storage.iran.liara.space type: string envAuth: default: false @@ -8589,9 +9491,17 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string - region: - description: Region where your bucket will be created and your data stored. - example: us-east-1 + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -8606,6 +9516,13 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + storageClass: + description: The storage class to use when storing new objects in S3. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -8623,6 +9540,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -8644,6 +9569,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -8661,7 +9590,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3QiniuConfig: + storage.s3LinodeConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -8710,8 +9639,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Qiniu Object Storage. - example: s3-cn-east-1.qiniucs.com + description: Endpoint for S3 API. + example: nl-ams-1.linodeobjects.com type: string envAuth: default: false @@ -8735,10 +9664,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - example: cn-east-1 - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -8776,9 +9701,17 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string - region: - description: Region to connect to. - example: cn-east-1 + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -8793,9 +9726,9 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string - storageClass: - description: The storage class to use when storing new objects in Qiniu. - example: STANDARD + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string uploadConcurrency: default: 4 @@ -8814,6 +9747,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -8835,6 +9776,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -8852,7 +9797,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3RackCorpConfig: + storage.s3LyveCloudConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -8901,8 +9846,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for RackCorp Object Storage. - example: s3.rackcorp.com + description: Endpoint for S3 API. + example: s3.us-west-1.{account_name}.lyve.seagate.com type: string envAuth: default: false @@ -8927,9 +9872,7 @@ definitions: description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer locationConstraint: - description: Location constraint - the location where your bucket will be - located and your data stored. - example: global + description: Location constraint - must be set to match the Region. type: string maxUploadParts: default: 10000 @@ -8969,9 +9912,19 @@ definitions: description: Profile to use in the shared credentials file. type: string region: - description: region - the location where your bucket will be created and your - data stored. - example: global + description: Region to connect to. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -8986,6 +9939,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -9003,6 +9960,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -9024,6 +9989,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -9041,7 +10010,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3RcloneConfig: + storage.s3MagaluConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -9091,6 +10060,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. + example: br-se1.magaluobjects.com type: string envAuth: default: false @@ -9114,9 +10084,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -9154,9 +10121,17 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string - region: - description: Region to connect to. - example: "" + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -9171,6 +10146,13 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + storageClass: + description: The storage class to use when storing new objects in S3. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -9188,6 +10170,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -9209,6 +10199,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -9226,15 +10220,11 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3ScalewayConfig: + storage.s3MegaConfig: properties: accessKeyId: description: AWS Access Key ID. type: string - acl: - description: Canned ACL used when creating buckets and storing or copying - objects. - type: string bucketAcl: description: Canned ACL used when creating buckets. example: private @@ -9275,8 +10265,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Scaleway Object Storage. - example: s3.nl-ams.scw.cloud + description: Endpoint for S3 API. + example: s3.eu-central-1.s4.mega.io type: string envAuth: default: false @@ -9337,9 +10327,17 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string - region: - description: Region to connect to. - example: nl-ams + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -9354,9 +10352,9 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string - storageClass: - description: The storage class to use when storing new objects in S3. - example: "" + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string uploadConcurrency: default: 4 @@ -9375,6 +10373,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -9396,6 +10402,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -9413,7 +10423,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3SeaweedFSConfig: + storage.s3MinioConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -9463,7 +10473,6 @@ definitions: type: string endpoint: description: Endpoint for S3 API. - example: localhost:8333 type: string envAuth: default: false @@ -9529,7 +10538,18 @@ definitions: type: string region: description: Region to connect to. - example: "" + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -9538,12 +10558,44 @@ definitions: secretAccessKey: description: AWS Secret Access Key (password). type: string + serverSideEncryption: + description: The server-side encryption algorithm used when storing this object + in S3. + type: string sessionToken: description: An AWS session token. type: string sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + sseCustomerAlgorithm: + description: If using SSE-C, the server-side encryption algorithm used when + storing this object in S3. + example: "" + type: string + sseCustomerKey: + description: To use SSE-C you may provide the secret encryption key used to + encrypt/decrypt your data. + example: "" + type: string + sseCustomerKeyBase64: + description: If using SSE-C you must provide the secret encryption key encoded + in base64 format to encrypt/decrypt your data. + example: "" + type: string + sseCustomerKeyMd5: + description: If using SSE-C you may provide the secret encryption key MD5 + checksum (optional). + example: "" + type: string + sseKmsKeyId: + description: If using KMS ID you must provide the ARN of Key. + example: "" + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -9561,6 +10613,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -9582,6 +10642,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -9599,7 +10663,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3StackPathConfig: + storage.s3NeteaseConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -9648,8 +10712,7 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for StackPath Object Storage. - example: s3.us-east-2.stackpathstorage.com + description: Endpoint for S3 API. type: string envAuth: default: false @@ -9673,6 +10736,9 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -9712,7 +10778,18 @@ definitions: type: string region: description: Region to connect to. - example: "" + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -9727,6 +10804,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -9744,6 +10825,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -9765,6 +10854,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -9782,11 +10875,15 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3StorjConfig: + storage.s3OVHcloudConfig: properties: accessKeyId: description: AWS Access Key ID. type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string bucketAcl: description: Canned ACL used when creating buckets. example: private @@ -9827,8 +10924,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Storj Gateway. - example: gateway.storjshare.io + description: Endpoint for S3 API. + example: s3.gra.io.cloud.ovh.net type: string envAuth: default: false @@ -9889,6 +10986,22 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region to connect to. + example: gra + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -9902,6 +11015,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -9919,6 +11036,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -9940,6 +11065,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -9957,11 +11086,15 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3SynologyConfig: + storage.s3OtherConfig: properties: accessKeyId: description: AWS Access Key ID. type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string bucketAcl: description: Canned ACL used when creating buckets. example: private @@ -10002,8 +11135,7 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Synology C2 Object Storage API. - example: eu-001.s3.synologyc2.net + description: Endpoint for S3 API. type: string envAuth: default: false @@ -10068,8 +11200,19 @@ definitions: description: Profile to use in the shared credentials file. type: string region: - description: Region where your data stored. - example: eu-001 + description: Region to connect to. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -10084,6 +11227,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -10101,6 +11248,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -10122,6 +11277,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -10139,7 +11298,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3TencentCOSConfig: + storage.s3OutscaleConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -10147,7 +11306,6 @@ definitions: acl: description: Canned ACL used when creating buckets and storing or copying objects. - example: default type: string bucketAcl: description: Canned ACL used when creating buckets. @@ -10189,8 +11347,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Tencent COS API. - example: cos.ap-beijing.myqcloud.com + description: Endpoint for S3 API. + example: oos.eu-west-2.outscale.com type: string envAuth: default: false @@ -10251,6 +11409,22 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region to connect to. + example: eu-west-2 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string sdkLogMode: default: "Off" description: Set to debug the SDK @@ -10264,10 +11438,9 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string - storageClass: - description: The storage class to use when storing new objects in Tencent - COS. - example: "" + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string uploadConcurrency: default: 4 @@ -10286,6 +11459,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -10307,6 +11488,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -10324,7 +11509,7 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.s3WasabiConfig: + storage.s3PetaboxConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -10374,7 +11559,7 @@ definitions: type: string endpoint: description: Endpoint for S3 API. - example: s3.wasabisys.com + example: s3.petabox.io type: string envAuth: default: false @@ -10398,9 +11583,6 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. @@ -10440,7 +11622,19 @@ definitions: type: string region: description: Region to connect to. - example: "" + example: eu-central-1 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. type: string sdkLogMode: default: "Off" @@ -10455,6 +11649,10 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string uploadConcurrency: default: 4 description: Concurrency for multipart uploads and copies. @@ -10472,6 +11670,14 @@ definitions: description: Set if rclone should report BucketAlreadyExists errors on bucket creation. type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string useDualStack: default: false description: If true use AWS S3 dual-stack endpoint (IPv6 support). @@ -10493,6 +11699,10 @@ definitions: default: unset description: Whether to use an unsigned payload in PutObject type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string v2Auth: default: false description: If true use v2 authentication. @@ -10510,666 +11720,4255 @@ definitions: description: Include old versions in directory listings. type: boolean type: object - storage.seafileConfig: + storage.s3QiniuConfig: properties: - 2fa: - default: false - description: Two-factor authentication ('true' if the account has 2FA enabled). - type: boolean - authToken: - description: Authentication token. + accessKeyId: + description: AWS Access Key ID. type: string - createLibrary: + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: default: false - description: Should rclone create a library if it doesn't exist. + description: If set this will decompress gzip encoded objects. type: boolean description: description: Description of the remote. type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string encoding: - default: Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8 + default: Slash,InvalidUtf8,Dot description: The encoding for the backend. type: string - library: - description: Name of the library. - type: string - libraryKey: - description: Library password (for encrypted libraries only). - type: string - pass: - description: Password. - type: string - url: - description: URL of seafile host to connect to. - example: https://cloud.seafile.com/ + endpoint: + description: Endpoint for S3 API. + example: s3-cn-east-1.qiniucs.com type: string - user: - description: User name (usually email address). - type: string - type: object - storage.sftpConfig: - properties: - askPassword: + envAuth: default: false - description: Allow asking for SFTP password when needed. + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false type: boolean - chunkSize: - default: 32Ki - description: Upload and download chunk size. - type: string - ciphers: - description: Space separated list of ciphers to be used for session encryption, - ordered by preference. - type: string - concurrency: - default: 64 - description: The maximum number of outstanding requests for one file + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). type: integer - connections: + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: default: 0 - description: Maximum number of SFTP simultaneous connections, 0 for unlimited. + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - copyIsHardlink: + locationConstraint: + description: Location constraint - must be set to match the Region. + example: cn-east-1 + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: default: false - description: Set to enable server side copies using hardlinks. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean - description: - description: Description of the remote. + mightGzip: + default: unset + description: Set this if the backend might gzip objects. type: string - disableConcurrentReads: + noCheckBucket: default: false - description: If set don't use concurrent reads. + description: If set, don't attempt to check the bucket exists or create it. type: boolean - disableConcurrentWrites: + noHead: default: false - description: If set don't use concurrent writes. + description: If set, don't HEAD uploaded objects to check integrity. type: boolean - disableHashcheck: + noHeadObject: default: false - description: Disable the execution of SSH commands to determine if remote - file hashing is available. + description: If set, do not do HEAD before GET when getting objects. type: boolean - host: - description: SSH host to connect to. + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. type: string - hostKeyAlgorithms: - description: Space separated list of host key algorithms, ordered by preference. + region: + description: Region to connect to. + example: cn-east-1 type: string - idleTimeout: - default: 1m0s - description: Max time before closing idle connections. + roleArn: + description: ARN of the IAM role to assume. type: string - keyExchange: - description: Space separated list of key exchange algorithms, ordered by preference. + roleExternalId: + description: External ID for assumed role. type: string - keyFile: - description: Path to PEM-encoded private key file. + roleSessionDuration: + description: Session duration for assumed role. type: string - keyFilePass: - description: The passphrase to decrypt the PEM-encoded private key file. + roleSessionName: + description: Session name for assumed role. type: string - keyPem: - description: Raw PEM-encoded private key. + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string - keyUseAgent: - default: false - description: When set forces the usage of the ssh-agent. - type: boolean - knownHostsFile: - description: Optional path to known_hosts file. - example: ~/.ssh/known_hosts + secretAccessKey: + description: AWS Secret Access Key (password). type: string - macs: - description: Space separated list of MACs (message authentication code) algorithms, - ordered by preference. + sessionToken: + description: An AWS session token. type: string - md5sumCommand: - description: The command used to read md5 hashes. + sharedCredentialsFile: + description: Path to the shared credentials file. type: string - pass: - description: SSH password, leave blank to use ssh-agent. + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string - pathOverride: - description: Override path used by SSH shell commands. + storageClass: + description: The storage class to use when storing new objects in S3. + example: LINE type: string - port: - default: 22 - description: SSH port number. + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. type: integer - pubkeyFile: - description: Optional path to public key file. + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. type: string - serverCommand: - description: Specifies the path or command to run a sftp server on the remote - host. + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' type: string - setEnv: - description: Environment variables to pass to sftp and commands + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. type: string - setModtime: - default: true - description: Set the modified time on the remote if set. + useArnRegion: + default: false + description: If true, enables arn region support for the service. type: boolean - sha1sumCommand: - description: The command used to read sha1 hashes. + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. type: string - shellType: - description: The type of SSH shell on remote server, if any. - example: none + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification type: string - skipLinks: + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: default: false - description: Set to skip any symlinks and any other non regular files. + description: Whether to use a presigned request or PutObject for single part + uploads type: boolean - socksProxy: - description: Socks 5 proxy host. + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject type: string - ssh: - description: Path and arguments to external ssh binary. + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. type: string - subsystem: - default: sftp - description: Specifies the SSH2 subsystem on the remote host. + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. type: string - useFstat: + versionDeleted: default: false - description: If set use fstat instead of stat. + description: Show deleted file markers when using versions. type: boolean - useInsecureCipher: + versions: default: false - description: Enable the use of insecure ciphers and key exchange methods. - example: false + description: Include old versions in directory listings. type: boolean - user: - default: $USER - description: SSH username. - type: string type: object - storage.sharefileConfig: + storage.s3RabataConfig: properties: - authUrl: - description: Auth server URL. + accessKeyId: + description: AWS Access Key ID. type: string chunkSize: - default: 64Mi - description: Upload chunk size. - type: string - clientId: - description: OAuth Client Id. + default: 5Mi + description: Chunk size to use for uploading. type: string - clientSecret: - description: OAuth Client Secret. + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean description: description: Description of the remote. type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string encoding: - default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot + default: Slash,InvalidUtf8,Dot description: The encoding for the backend. type: string endpoint: - description: Endpoint for API calls. - type: string - rootFolderId: - description: ID of the root folder. - example: "" - type: string - token: - description: OAuth Access Token as a JSON blob. - type: string - tokenUrl: - description: Token server url. + description: Endpoint for S3 API. + example: s3.us-east-1.rabata.io type: string - uploadCutoff: - default: 128Mi - description: Cutoff for switching to multipart upload. + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' type: string - type: object - storage.siaConfig: - properties: - apiPassword: - description: Sia Daemon API Password. + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + example: us-east-1 type: string - apiUrl: - default: http://127.0.0.1:9980 - description: Sia daemon API URL, like http://sia.daemon.host:9980. + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string - description: - description: Description of the remote. + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. type: string - encoding: - default: Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot - description: The encoding for the backend. + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. type: string - userAgent: - default: Sia-Agent - description: Siad User Agent + region: + description: Region to connect to. + example: eu-west-1 type: string - type: object - storage.smbConfig: - properties: - caseInsensitive: - default: true - description: Whether the server is configured to be case-insensitive. - type: boolean - description: - description: Description of the remote. + roleArn: + description: ARN of the IAM role to assume. type: string - domain: - default: WORKGROUP - description: Domain name for NTLM authentication. + roleExternalId: + description: External ID for assumed role. type: string - encoding: - default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot - description: The encoding for the backend. + roleSessionDuration: + description: Session duration for assumed role. type: string - hideSpecialShare: - default: true - description: Hide special shares (e.g. print$) which users aren't supposed - to access. - type: boolean - host: - description: SMB server hostname to connect to. + roleSessionName: + description: Session name for assumed role. type: string - idleTimeout: - default: 1m0s - description: Max time before closing idle connections. + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string - pass: - description: SMB password. + secretAccessKey: + description: AWS Secret Access Key (password). type: string - port: - default: 445 - description: SMB port number. + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. type: integer - spn: - description: Service principal name. + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. type: string - user: - default: $USER - description: SMB username. + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' type: string - type: object - storage.storjExistingConfig: - properties: - accessGrant: - description: Access grant. + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. type: string - description: - description: Description of the remote. + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. type: string - type: object - storage.storjNewConfig: - properties: - apiKey: - description: API key. + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification type: string - description: - description: Description of the remote. + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. type: string - passphrase: - description: Encryption passphrase. + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject type: string - satelliteAddress: - default: us1.storj.io - description: Satellite address. - example: us1.storj.io + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean type: object - storage.sugarsyncConfig: + storage.s3RackCorpConfig: properties: accessKeyId: - description: Sugarsync Access Key ID. + description: AWS Access Key ID. type: string - appId: - description: Sugarsync App ID. + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. type: string - authorization: - description: Sugarsync authorization. + bucketAcl: + description: Canned ACL used when creating buckets. + example: private type: string - authorizationExpiry: - description: Sugarsync authorization expiry. + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. type: string - deletedId: - description: Sugarsync deleted folder id. + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean description: description: Description of the remote. type: string - encoding: - default: Slash,Ctl,InvalidUtf8,Dot - description: The encoding for the backend. - type: string - hardDelete: + directoryMarkers: default: false - description: Permanently delete files if true + description: Upload an empty object with a trailing slash when a new directory + is created type: boolean - privateAccessKey: - description: Sugarsync Private Access Key. + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. type: string - refreshToken: - description: Sugarsync refresh token. + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. type: string - rootId: - description: Sugarsync root id. + endpoint: + description: Endpoint for S3 API. + example: s3.rackcorp.com type: string - user: - description: Sugarsync user. - type: string - type: object - storage.swiftConfig: - properties: - applicationCredentialId: - description: Application Credential ID (OS_APPLICATION_CREDENTIAL_ID). - type: string - applicationCredentialName: - description: Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME). - type: string - applicationCredentialSecret: - description: Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET). - type: string - auth: - description: Authentication URL for server (OS_AUTH_URL). - example: https://auth.api.rackspacecloud.com/v1.0 - type: string - authToken: - description: Auth Token from alternate authentication - optional (OS_AUTH_TOKEN). + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' type: string - authVersion: + listVersion: default: 0 - description: AuthVersion - optional - set to (1,2,3) if your auth URL has - no version (ST_AUTH_VERSION). + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - chunkSize: - default: 5Gi - description: Above this size files will be chunked. - type: string - description: - description: Description of the remote. - type: string - domain: - description: User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) - type: string - encoding: - default: Slash,InvalidUtf8 - description: The encoding for the backend. + locationConstraint: + description: Location constraint - must be set to match the Region. + example: global type: string - endpointType: - default: public - description: Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE). - example: public + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string - envAuth: + memoryPoolUseMmap: default: false - description: Get swift credentials from environment variables in standard - OpenStack form. - example: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean - fetchUntilEmptyPage: + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: default: false - description: When paginating, always fetch unless we received an empty page. + description: If set, don't attempt to check the bucket exists or create it. type: boolean - key: - description: API key or password (OS_PASSWORD). - type: string - leavePartsOnError: + noHead: default: false - description: If true avoid calling abort upload on a failure. + description: If set, don't HEAD uploaded objects to check integrity. type: boolean - noChunk: + noHeadObject: default: false - description: Don't chunk files during streaming upload. + description: If set, do not do HEAD before GET when getting objects. type: boolean - noLargeObjects: + noSystemMetadata: default: false - description: Disable support for static and dynamic large objects + description: Suppress setting and reading of system metadata type: boolean - partialPageFetchThreshold: - default: 0 - description: When paginating, fetch if the current page is within this percentage - of the limit. - type: integer + profile: + description: Profile to use in the shared credentials file. + type: string region: - description: Region name - optional (OS_REGION_NAME). + description: Region to connect to. + example: global type: string - storagePolicy: - description: The storage policy to use when creating a new container. - example: "" + roleArn: + description: ARN of the IAM role to assume. type: string - storageUrl: - description: Storage URL - optional (OS_STORAGE_URL). + roleExternalId: + description: External ID for assumed role. type: string - tenant: - description: Tenant name - optional for v1 auth, this or tenant_id required - otherwise (OS_TENANT_NAME or OS_PROJECT_NAME). + roleSessionDuration: + description: Session duration for assumed role. type: string - tenantDomain: - description: Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME). + roleSessionName: + description: Session name for assumed role. type: string - tenantId: - description: Tenant ID - optional for v1 auth, this or tenant required otherwise - (OS_TENANT_ID). + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string - useSegmentsContainer: - default: unset - description: Choose destination for large object segments + secretAccessKey: + description: AWS Secret Access Key (password). type: string - user: - description: User name to log in (OS_USERNAME). + sessionToken: + description: An AWS session token. type: string - userId: - description: User ID to log in - optional - most swift systems use user and - leave this blank (v3 auth) (OS_USER_ID). + sharedCredentialsFile: + description: Path to the shared credentials file. type: string - type: object - storage.unionConfig: - properties: - actionPolicy: - default: epall - description: Policy to choose upstream on ACTION category. + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. type: string - cacheTime: - default: 120 - description: Cache time of usage and free space (in seconds). + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. type: integer - createPolicy: - default: epmfs - description: Policy to choose upstream on CREATE category. + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. type: string - description: - description: Description of the remote. + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' type: string - minFreeSpace: - default: 1Gi - description: Minimum viable free space for lfs/eplfs policies. + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. type: string - searchPolicy: - default: ff - description: Policy to choose upstream on SEARCH category. + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. type: string - upstreams: - description: List of space separated upstreams. + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification type: string - type: object - storage.uptoboxConfig: - properties: - accessToken: - description: Your access token. + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. type: string - description: - description: Description of the remote. + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject type: string - encoding: - default: Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot - description: The encoding for the backend. + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. type: string - private: + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: default: false - description: Set to make uploaded files private + description: Include old versions in directory listings. type: boolean type: object - storage.webdavConfig: + storage.s3RcloneConfig: properties: - bearerToken: - description: Bearer token instead of user/pass (e.g. a Macaroon). + accessKeyId: + description: AWS Access Key ID. type: string - bearerTokenCommand: - description: Command to run to get a bearer token. + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean description: description: Description of the remote. type: string - encoding: - description: The encoding for the backend. - type: string - headers: - description: Set HTTP headers for all transactions. - type: string - nextcloudChunkSize: - default: 10Mi - description: Nextcloud upload chunk size. - type: string - owncloudExcludeMounts: + directoryMarkers: default: false - description: Exclude ownCloud mounted storages + description: Upload an empty object with a trailing slash when a new directory + is created type: boolean - owncloudExcludeShares: + disableChecksum: default: false - description: Exclude ownCloud shares + description: Don't store MD5 checksum with object metadata. type: boolean - pacerMinSleep: - default: 10ms - description: Minimum time to sleep between API calls. + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. type: string - pass: - description: Password. + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. type: string - unixSocket: - description: Path to a unix domain socket to dial to, instead of opening a - TCP connection directly + endpoint: + description: Endpoint for S3 API. type: string - url: - description: URL of http host to connect to. + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' type: string - user: - description: User name. + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string - vendor: - description: Name of the WebDAV site/service/software you are using. - example: fastmail + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. type: string - type: object - storage.yandexConfig: - properties: - authUrl: - description: Auth server URL. + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. type: string - clientId: - description: OAuth Client Id. + roleArn: + description: ARN of the IAM role to assume. type: string - clientSecret: - description: OAuth Client Secret. + roleExternalId: + description: External ID for assumed role. type: string - description: - description: Description of the remote. + roleSessionDuration: + description: Session duration for assumed role. type: string - encoding: - default: Slash,Del,Ctl,InvalidUtf8,Dot - description: The encoding for the backend. + roleSessionName: + description: Session name for assumed role. type: string - hardDelete: + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: default: false - description: Delete files permanently rather than putting them into the trash. + description: If true, enables arn region support for the service. type: boolean - spoofUa: - default: true - description: Set the user agent to match an official version of the yandex - disk client. May help with upload performance. + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). type: boolean - token: - description: OAuth Access Token as a JSON blob. + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification type: string - tokenUrl: - description: Token server url. + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean type: object - storage.zohoConfig: + storage.s3ScalewayConfig: properties: - authUrl: - description: Auth server URL. + accessKeyId: + description: AWS Access Key ID. type: string - clientId: - description: OAuth Client Id. + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. type: string - clientSecret: - description: OAuth Client Secret. + bucketAcl: + description: Canned ACL used when creating buckets. + example: private type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean description: description: Description of the remote. type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string encoding: - default: Del,Ctl,InvalidUtf8 + default: Slash,InvalidUtf8,Dot description: The encoding for the backend. type: string - region: - description: Zoho region to connect to. - example: com + endpoint: + description: Endpoint for S3 API. + example: s3.nl-ams.scw.cloud + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: nl-ams + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + storageClass: + description: The storage class to use when storing new objects in S3. + example: "" + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3SeaweedFSConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: localhost:8333 + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3SelectelConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: s3.ru-1.storage.selcloud.ru + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: ru-3 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3ServercoreConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: s3.ru-1.storage.selcloud.ru + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: uz-2 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3SpectraLogicConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3StackPathConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: s3.us-east-2.stackpathstorage.com + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3StorjConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: gateway.storjshare.io + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3SynologyConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: eu-001.s3.synologyc2.net + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: eu-001 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3TencentCOSConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + example: default + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: cos.ap-beijing.myqcloud.com + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + storageClass: + description: The storage class to use when storing new objects in S3. + example: ARCHIVE + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3WasabiConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: s3.wasabisys.com + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3ZataConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: idr01.zata.ai + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: us-east-1 + type: string + roleArn: + description: ARN of the IAM role to assume. + type: string + roleExternalId: + description: External ID for assumed role. + type: string + roleSessionDuration: + description: Session duration for assumed role. + type: string + roleSessionName: + description: Session name for assumed role. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + signAcceptEncoding: + default: unset + description: Set if rclone should include Accept-Encoding as part of the signature. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useArnRegion: + default: false + description: If true, enables arn region support for the service. + type: boolean + useDataIntegrityProtections: + default: unset + description: If true use AWS S3 data integrity protections. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + useXId: + default: unset + description: Set if rclone should add x-id URL parameters. + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.seafileConfig: + properties: + 2fa: + default: false + description: Two-factor authentication ('true' if the account has 2FA enabled). + type: boolean + authToken: + description: Authentication token. + type: string + createLibrary: + default: false + description: Should rclone create a library if it doesn't exist. + type: boolean + description: + description: Description of the remote. + type: string + encoding: + default: Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + library: + description: Name of the library. + type: string + libraryKey: + description: Library password (for encrypted libraries only). + type: string + pass: + description: Password. + type: string + url: + description: URL of seafile host to connect to. + example: https://cloud.seafile.com/ + type: string + user: + description: User name (usually email address). + type: string + type: object + storage.sftpConfig: + properties: + askPassword: + default: false + description: Allow asking for SFTP password when needed. + type: boolean + blake3sumCommand: + description: The command used to read BLAKE3 hashes. + type: string + chunkSize: + default: 32Ki + description: Upload and download chunk size. + type: string + ciphers: + description: Space separated list of ciphers to be used for session encryption, + ordered by preference. + type: string + concurrency: + default: 64 + description: The maximum number of outstanding requests for one file + type: integer + connections: + default: 0 + description: Maximum number of SFTP simultaneous connections, 0 for unlimited. + type: integer + copyIsHardlink: + default: false + description: Set to enable server side copies using hardlinks. + type: boolean + crc32sumCommand: + description: The command used to read CRC-32 hashes. + type: string + description: + description: Description of the remote. + type: string + disableConcurrentReads: + default: false + description: If set don't use concurrent reads. + type: boolean + disableConcurrentWrites: + default: false + description: If set don't use concurrent writes. + type: boolean + disableHashcheck: + default: false + description: Disable the execution of SSH commands to determine if remote + file hashing is available. + type: boolean + hashes: + description: Comma separated list of supported checksum types. + type: string + host: + description: SSH host to connect to. + type: string + hostKeyAlgorithms: + description: Space separated list of host key algorithms, ordered by preference. + type: string + httpProxy: + description: URL for HTTP CONNECT proxy + type: string + idleTimeout: + default: 1m0s + description: Max time before closing idle connections. + type: string + keyExchange: + description: Space separated list of key exchange algorithms, ordered by preference. + type: string + keyFile: + description: Path to PEM-encoded private key file. + type: string + keyFilePass: + description: The passphrase to decrypt the PEM-encoded private key file. + type: string + keyPem: + description: Raw PEM-encoded private key. + type: string + keyUseAgent: + default: false + description: When set forces the usage of the ssh-agent. + type: boolean + knownHostsFile: + description: Optional path to known_hosts file. + example: ~/.ssh/known_hosts + type: string + macs: + description: Space separated list of MACs (message authentication code) algorithms, + ordered by preference. + type: string + md5sumCommand: + description: The command used to read MD5 hashes. + type: string + pass: + description: SSH password, leave blank to use ssh-agent. + type: string + pathOverride: + description: Override path used by SSH shell commands. + type: string + port: + default: 22 + description: SSH port number. + type: integer + pubkey: + description: SSH public certificate for public certificate based authentication. + type: string + pubkeyFile: + description: Optional path to public key file. + type: string + serverCommand: + description: Specifies the path or command to run a sftp server on the remote + host. + type: string + setEnv: + description: Environment variables to pass to sftp and commands + type: string + setModtime: + default: true + description: Set the modified time on the remote if set. + type: boolean + sha1sumCommand: + description: The command used to read SHA-1 hashes. + type: string + sha256sumCommand: + description: The command used to read SHA-256 hashes. + type: string + shellType: + description: The type of SSH shell on remote server, if any. + example: none + type: string + skipLinks: + default: false + description: Set to skip any symlinks and any other non regular files. + type: boolean + socksProxy: + description: Socks 5 proxy host. + type: string + ssh: + description: Path and arguments to external ssh binary. + type: string + subsystem: + default: sftp + description: Specifies the SSH2 subsystem on the remote host. + type: string + useFstat: + default: false + description: If set use fstat instead of stat. + type: boolean + useInsecureCipher: + default: false + description: Enable the use of insecure ciphers and key exchange methods. + example: false + type: boolean + user: + default: $USER + description: SSH username. + type: string + xxh3sumCommand: + description: The command used to read XXH3 hashes. + type: string + xxh128sumCommand: + description: The command used to read XXH128 hashes. + type: string + type: object + storage.sharefileConfig: + properties: + authUrl: + description: Auth server URL. + type: string + chunkSize: + default: 64Mi + description: Upload chunk size. + type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean + clientId: + description: OAuth Client Id. + type: string + clientSecret: + description: OAuth Client Secret. + type: string + description: + description: Description of the remote. + type: string + encoding: + default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for API calls. + type: string + rootFolderId: + description: ID of the root folder. + example: "" + type: string + token: + description: OAuth Access Token as a JSON blob. + type: string + tokenUrl: + description: Token server url. + type: string + uploadCutoff: + default: 128Mi + description: Cutoff for switching to multipart upload. + type: string + type: object + storage.siaConfig: + properties: + apiPassword: + description: Sia Daemon API Password. + type: string + apiUrl: + default: http://127.0.0.1:9980 + description: Sia daemon API URL, like http://sia.daemon.host:9980. + type: string + description: + description: Description of the remote. + type: string + encoding: + default: Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + userAgent: + default: Sia-Agent + description: Siad User Agent + type: string + type: object + storage.smbConfig: + properties: + caseInsensitive: + default: true + description: Whether the server is configured to be case-insensitive. + type: boolean + description: + description: Description of the remote. + type: string + domain: + default: WORKGROUP + description: Domain name for NTLM authentication. + type: string + encoding: + default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + hideSpecialShare: + default: true + description: Hide special shares (e.g. print$) which users aren't supposed + to access. + type: boolean + host: + description: SMB server hostname to connect to. + type: string + idleTimeout: + default: 1m0s + description: Max time before closing idle connections. + type: string + kerberosCcache: + description: Path to the Kerberos credential cache (krb5cc). + type: string + pass: + description: SMB password. + type: string + port: + default: 445 + description: SMB port number. + type: integer + spn: + description: Service principal name. + type: string + useKerberos: + default: false + description: Use Kerberos authentication. + type: boolean + user: + default: $USER + description: SMB username. + type: string + type: object + storage.storjExistingConfig: + properties: + accessGrant: + description: Access grant. + type: string + description: + description: Description of the remote. + type: string + type: object + storage.storjNewConfig: + properties: + apiKey: + description: API key. + type: string + description: + description: Description of the remote. + type: string + passphrase: + description: Encryption passphrase. + type: string + satelliteAddress: + default: us1.storj.io + description: Satellite address. + example: us1.storj.io + type: string + type: object + storage.sugarsyncConfig: + properties: + accessKeyId: + description: Sugarsync Access Key ID. + type: string + appId: + description: Sugarsync App ID. + type: string + authorization: + description: Sugarsync authorization. + type: string + authorizationExpiry: + description: Sugarsync authorization expiry. + type: string + deletedId: + description: Sugarsync deleted folder id. + type: string + description: + description: Description of the remote. + type: string + encoding: + default: Slash,Ctl,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + hardDelete: + default: false + description: Permanently delete files if true + type: boolean + privateAccessKey: + description: Sugarsync Private Access Key. + type: string + refreshToken: + description: Sugarsync refresh token. + type: string + rootId: + description: Sugarsync root id. + type: string + user: + description: Sugarsync user. + type: string + type: object + storage.swiftConfig: + properties: + applicationCredentialId: + description: Application Credential ID (OS_APPLICATION_CREDENTIAL_ID). + type: string + applicationCredentialName: + description: Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME). + type: string + applicationCredentialSecret: + description: Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET). + type: string + auth: + description: Authentication URL for server (OS_AUTH_URL). + example: https://auth.api.rackspacecloud.com/v1.0 + type: string + authToken: + description: Auth Token from alternate authentication - optional (OS_AUTH_TOKEN). + type: string + authVersion: + default: 0 + description: AuthVersion - optional - set to (1,2,3) if your auth URL has + no version (ST_AUTH_VERSION). + type: integer + chunkSize: + default: 5Gi + description: Above this size files will be chunked. + type: string + description: + description: Description of the remote. + type: string + domain: + description: User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) + type: string + encoding: + default: Slash,InvalidUtf8 + description: The encoding for the backend. + type: string + endpointType: + default: public + description: Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE). + example: public + type: string + envAuth: + default: false + description: Get swift credentials from environment variables in standard + OpenStack form. + example: false + type: boolean + fetchUntilEmptyPage: + default: false + description: When paginating, always fetch unless we received an empty page. + type: boolean + key: + description: API key or password (OS_PASSWORD). + type: string + leavePartsOnError: + default: false + description: If true avoid calling abort upload on a failure. + type: boolean + noChunk: + default: false + description: Don't chunk files during streaming upload. + type: boolean + noLargeObjects: + default: false + description: Disable support for static and dynamic large objects + type: boolean + partialPageFetchThreshold: + default: 0 + description: When paginating, fetch if the current page is within this percentage + of the limit. + type: integer + region: + description: Region name - optional (OS_REGION_NAME). + type: string + storagePolicy: + description: The storage policy to use when creating a new container. + example: "" + type: string + storageUrl: + description: Storage URL - optional (OS_STORAGE_URL). + type: string + tenant: + description: Tenant name - optional for v1 auth, this or tenant_id required + otherwise (OS_TENANT_NAME or OS_PROJECT_NAME). + type: string + tenantDomain: + description: Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME). + type: string + tenantId: + description: Tenant ID - optional for v1 auth, this or tenant required otherwise + (OS_TENANT_ID). + type: string + useSegmentsContainer: + default: unset + description: Choose destination for large object segments + type: string + user: + description: User name to log in (OS_USERNAME). + type: string + userId: + description: User ID to log in - optional - most swift systems use user and + leave this blank (v3 auth) (OS_USER_ID). + type: string + type: object + storage.unionConfig: + properties: + actionPolicy: + default: epall + description: Policy to choose upstream on ACTION category. + type: string + cacheTime: + default: 120 + description: Cache time of usage and free space (in seconds). + type: integer + createPolicy: + default: epmfs + description: Policy to choose upstream on CREATE category. + type: string + description: + description: Description of the remote. + type: string + minFreeSpace: + default: 1Gi + description: Minimum viable free space for lfs/eplfs policies. + type: string + searchPolicy: + default: ff + description: Policy to choose upstream on SEARCH category. + type: string + upstreams: + description: List of space separated upstreams. + type: string + type: object + storage.webdavConfig: + properties: + authRedirect: + default: false + description: Preserve authentication on redirect. + type: boolean + bearerToken: + description: Bearer token instead of user/pass (e.g. a Macaroon). + type: string + bearerTokenCommand: + description: Command to run to get a bearer token. + type: string + description: + description: Description of the remote. + type: string + encoding: + description: The encoding for the backend. + type: string + headers: + description: Set HTTP headers for all transactions. + type: string + nextcloudChunkSize: + default: 10Mi + description: Nextcloud upload chunk size. + type: string + owncloudExcludeMounts: + default: false + description: Exclude ownCloud mounted storages + type: boolean + owncloudExcludeShares: + default: false + description: Exclude ownCloud shares + type: boolean + pacerMinSleep: + default: 10ms + description: Minimum time to sleep between API calls. + type: string + pass: + description: Password. + type: string + unixSocket: + description: Path to a unix domain socket to dial to, instead of opening a + TCP connection directly + type: string + url: + description: URL of http host to connect to. + type: string + user: + description: User name. + type: string + vendor: + description: Name of the WebDAV site/service/software you are using. + example: fastmail + type: string + type: object + storage.yandexConfig: + properties: + authUrl: + description: Auth server URL. + type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean + clientId: + description: OAuth Client Id. + type: string + clientSecret: + description: OAuth Client Secret. + type: string + description: + description: Description of the remote. + type: string + encoding: + default: Slash,Del,Ctl,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + hardDelete: + default: false + description: Delete files permanently rather than putting them into the trash. + type: boolean + spoofUa: + default: true + description: Set the user agent to match an official version of the yandex + disk client. May help with upload performance. + type: boolean + token: + description: OAuth Access Token as a JSON blob. + type: string + tokenUrl: + description: Token server url. + type: string + type: object + storage.zohoConfig: + properties: + authUrl: + description: Auth server URL. + type: string + clientCredentials: + default: false + description: Use client credentials OAuth flow. + type: boolean + clientId: + description: OAuth Client Id. + type: string + clientSecret: + description: OAuth Client Secret. + type: string + description: + description: Description of the remote. + type: string + encoding: + default: Del,Ctl,InvalidUtf8 + description: The encoding for the backend. + type: string + region: + description: Zoho region to connect to. + example: com + type: string + token: + description: OAuth Access Token as a JSON blob. + type: string + tokenUrl: + description: Token server url. + type: string + uploadCutoff: + default: 10Mi + description: Cutoff for switching to large file upload api (>= 10 MiB). + type: string + type: object + store.PieceReader: + type: object + wallet.ImportKeystoreRequest: + properties: + name: + description: optional human-readable name + type: string + privateKey: + description: lotus wallet export format + type: string + type: object +externalDocs: + description: OpenAPI + url: https://swagger.io/resources/open-api/ +info: + contact: + name: Singularity Team + url: https://github.com/data-preservation-programs/singularity/issues + description: This is the API for Singularity, a tool for large-scale clients with + PB-scale data onboarding to Filecoin network. + license: + name: MIT + Apache 2.0 + url: https://github.com/data-preservation-programs/singularity/blob/main/LICENSE + title: Singularity API + version: beta +paths: + /deal: + post: + consumes: + - application/json + description: List all deals + operationId: ListDeals + parameters: + - description: ListDealRequest + in: body + name: request + required: true + schema: + $ref: '#/definitions/deal.ListDealRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/model.Deal' + type: array + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: List all deals + tags: + - Deal + /file/{id}: + get: + consumes: + - application/json + operationId: GetFile + parameters: + - description: File ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.File' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Get details about a file + tags: + - File + /file/{id}/deals: + get: + consumes: + - application/json + operationId: GetFileDeals + parameters: + - description: File ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/file.DealsForFileRange' + type: array + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Get all deals that have been made for a file + tags: + - File + /file/{id}/prepare_to_pack: + post: + consumes: + - application/json + operationId: PrepareToPackFile + parameters: + - description: File ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + type: integer + "400": + description: Bad Request + schema: + type: string + "500": + description: Internal Server Error + schema: + type: string + summary: prepare job for a given item + tags: + - File + /file/{id}/retrieve: + get: + consumes: + - application/json + operationId: RetrieveFile + parameters: + - description: File ID + in: path + name: id + required: true + type: integer + - description: HTTP Range Header + in: header + name: Range + type: string + produces: + - application/octet-stream + responses: + "200": + description: OK + schema: + type: file + "206": + description: Partial Content + schema: + type: file + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "404": + description: Not Found + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Get content of a file + tags: + - File + /identity: + post: + consumes: + - application/json + operationId: SetIdentity + parameters: + - description: Create Request + in: body + name: request + required: true + schema: + $ref: '#/definitions/admin.SetIdentityRequest' + produces: + - application/json + responses: + "204": + description: No Content + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Set the user identity for tracking purpose + tags: + - Admin + /job/{id}/pack: + post: + consumes: + - application/json + operationId: Pack + parameters: + - description: Pack job ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Car' + "400": + description: Bad Request + schema: + type: string + "500": + description: Internal Server Error + schema: + type: string + summary: Pack a pack job into car files + tags: + - Job + /piece/{id}/metadata: + get: + description: Get metadata for a piece for how it may be reassembled from the + data source + parameters: + - description: Piece CID + in: path + name: id + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/store.PieceReader' + "400": + description: Bad Request + schema: + type: string + "404": + description: Not Found + schema: + type: string + "500": + description: Internal Server Error + schema: + type: string + summary: Get metadata for a piece + tags: + - Piece + /preparation: + get: + consumes: + - application/json + operationId: ListPreparations + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/model.Preparation' + type: array + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: List all preparations + tags: + - Preparation + post: + consumes: + - application/json + operationId: CreatePreparation + parameters: + - description: Create Request + in: body + name: request + required: true + schema: + $ref: '#/definitions/dataprep.CreateRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Preparation' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create a new preparation + tags: + - Preparation + /preparation/{id}: + get: + operationId: GetPreparationStatus + parameters: + - description: Preparation ID or name + in: path + name: id + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/job.SourceStatus' + type: array + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Get the status of a preparation + tags: + - Preparation + /preparation/{id}/output/{name}: + delete: + consumes: + - application/json + operationId: RemoveOutputStorage + parameters: + - description: Preparation ID or name + in: path + name: id + required: true type: string - token: - description: OAuth Access Token as a JSON blob. + - description: Output storage ID or name + in: path + name: name + required: true type: string - tokenUrl: - description: Token server url. + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Preparation' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Detach an output storage from a preparation + tags: + - Preparation + post: + consumes: + - application/json + operationId: AddOutputStorage + parameters: + - description: Preparation ID or name + in: path + name: id + required: true type: string - type: object - store.PieceReader: - type: object - wallet.ImportKeystoreRequest: - properties: - name: - description: optional human-readable name + - description: Output storage ID or name + in: path + name: name + required: true type: string - privateKey: - description: lotus wallet export format + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Preparation' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Attach an output storage with a preparation + tags: + - Preparation + /preparation/{id}/piece: + get: + consumes: + - application/json + operationId: ListPieces + parameters: + - description: Preparation ID or name + in: path + name: id + required: true type: string - type: object -externalDocs: - description: OpenAPI - url: https://swagger.io/resources/open-api/ -info: - contact: - name: Singularity Team - url: https://github.com/data-preservation-programs/singularity/issues - description: This is the API for Singularity, a tool for large-scale clients with - PB-scale data onboarding to Filecoin network. - license: - name: MIT + Apache 2.0 - url: https://github.com/data-preservation-programs/singularity/blob/main/LICENSE - title: Singularity API - version: beta -paths: - /deal: + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/dataprep.PieceList' + type: array + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: List all prepared pieces for a preparation + tags: + - Piece post: consumes: - application/json - description: List all deals - operationId: ListDeals + operationId: AddPiece parameters: - - description: ListDealRequest + - description: Preparation ID or name + in: path + name: id + required: true + type: string + - description: Piece information in: body name: request required: true schema: - $ref: '#/definitions/deal.ListDealRequest' + $ref: '#/definitions/dataprep.AddPieceRequest' produces: - application/json responses: "200": description: OK schema: - items: - $ref: '#/definitions/model.Deal' - type: array + $ref: '#/definitions/model.Car' "400": description: Bad Request schema: @@ -11178,143 +15977,216 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: List all deals + summary: Add a piece to a preparation tags: - - Deal - /file/{id}: + - Piece + /preparation/{id}/piece/{piece_cid}: + delete: + consumes: + - application/json + description: |- + Deletes a piece (CAR) and its associated records. For data pieces, resets file ranges + to allow re-packing. For DAG pieces, resets directory export flags for re-generation. + operationId: DeletePiece + parameters: + - description: Preparation ID or name + in: path + name: id + required: true + type: string + - description: Piece CID + in: path + name: piece_cid + required: true + type: string + - description: Delete options + in: body + name: request + required: true + schema: + $ref: '#/definitions/dataprep.DeletePieceRequest' + produces: + - application/json + responses: + "204": + description: No Content + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "404": + description: Not Found + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Delete a piece from a preparation + tags: + - Piece + /preparation/{id}/schedules: get: consumes: - application/json - operationId: GetFile + operationId: ListPreparationSchedules parameters: - - description: File ID + - description: Preparation ID or name in: path name: id required: true - type: integer + type: string produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.File' + items: + $ref: '#/definitions/model.Schedule' + type: array + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Get details about a file + summary: List all schedules for a preparation tags: - - File - /file/{id}/deals: - get: + - Deal Schedule + /preparation/{id}/source/{name}: + post: consumes: - application/json - operationId: GetFileDeals + operationId: AddSourceStorage parameters: - - description: File ID + - description: Preparation ID or name in: path name: id required: true - type: integer + type: string + - description: Source storage ID or name + in: path + name: name + required: true + type: string produces: - application/json responses: "200": description: OK schema: - items: - $ref: '#/definitions/file.DealsForFileRange' - type: array + $ref: '#/definitions/model.Preparation' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Get all deals that have been made for a file + summary: Attach a source storage with a preparation tags: - - File - /file/{id}/prepare_to_pack: - post: + - Preparation + /preparation/{id}/source/{name}/explore/{path}: + get: consumes: - application/json - operationId: PrepareToPackFile + operationId: ExplorePreparation parameters: - - description: File ID + - description: Preparation ID or name in: path name: id required: true - type: integer + type: string + - description: Source storage ID or name + in: path + name: name + required: true + type: string + - description: Directory path + in: path + name: path + required: true + type: string produces: - application/json responses: "200": description: OK schema: - type: integer + $ref: '#/definitions/dataprep.ExploreResult' "400": description: Bad Request schema: - type: string + $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: - type: string - summary: prepare job for a given item + $ref: '#/definitions/api.HTTPError' + summary: Explore a directory in a prepared source storage tags: - - File - /file/{id}/retrieve: - get: + - Preparation + /preparation/{id}/source/{name}/file: + post: consumes: - application/json - operationId: RetrieveFile + description: Tells Singularity that something is ready to be grabbed for data + preparation + operationId: PushFile parameters: - - description: File ID + - description: Preparation ID or name in: path name: id required: true - type: integer - - description: HTTP Range Header - in: header - name: Range type: string + - description: Source storage ID or name + in: path + name: name + required: true + type: string + - description: File Info + in: body + name: file + required: true + schema: + $ref: '#/definitions/file.Info' produces: - - application/octet-stream + - application/json responses: "200": description: OK schema: - type: file - "206": - description: Partial Content - schema: - type: file + $ref: '#/definitions/model.File' "400": description: Bad Request schema: $ref: '#/definitions/api.HTTPError' - "404": - description: Not Found - schema: - $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Get content of a file + summary: Push a file to be queued tags: - File - /identity: + /preparation/{id}/source/{name}/finalize: post: consumes: - application/json - operationId: SetIdentity + operationId: PrepareToPackSource parameters: - - description: Create Request - in: body - name: request + - description: Preparation ID or name + in: path + name: id required: true - schema: - $ref: '#/definitions/admin.SetIdentityRequest' + type: string + - description: Storage ID or name + in: path + name: name + required: true + type: string produces: - application/json responses: @@ -11323,89 +16195,112 @@ paths: "400": description: Bad Request schema: - $ref: '#/definitions/api.HTTPError' + type: string "500": description: Internal Server Error schema: - $ref: '#/definitions/api.HTTPError' - summary: Set the user identity for tracking purpose + type: string + summary: prepare to pack a data source tags: - - Admin - /job/{id}/pack: + - Job + /preparation/{id}/source/{name}/pause-daggen: post: consumes: - application/json - operationId: Pack + operationId: PauseDagGen parameters: - - description: Pack job ID + - description: Preparation ID or name in: path name: id required: true - type: integer + type: string + - description: Storage ID or name + in: path + name: name + required: true + type: string produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Car' + $ref: '#/definitions/model.Job' "400": description: Bad Request schema: - type: string + $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: - type: string - summary: Pack a pack job into car files + $ref: '#/definitions/api.HTTPError' + summary: Pause an ongoing DAG generation job tags: - Job - /piece/{id}/metadata: - get: - description: Get metadata for a piece for how it may be reassembled from the - data source + /preparation/{id}/source/{name}/pause-pack/{job_id}: + post: + consumes: + - application/json + operationId: PausePack parameters: - - description: Piece CID + - description: Preparation ID or name in: path name: id required: true type: string + - description: Storage ID or name + in: path + name: name + required: true + type: string + - description: Pack Job ID + in: path + name: job_id + required: true + type: integer produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/store.PieceReader' + items: + $ref: '#/definitions/model.Job' + type: array "400": description: Bad Request schema: - type: string - "404": - description: Not Found - schema: - type: string + $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: - type: string - summary: Get metadata for a piece + $ref: '#/definitions/api.HTTPError' + summary: Pause a specific packing job tags: - - Piece - /preparation: - get: + - Job + /preparation/{id}/source/{name}/pause-scan: + post: consumes: - application/json - operationId: ListPreparations + operationId: PauseScan + parameters: + - description: Preparation ID or name + in: path + name: id + required: true + type: string + - description: Storage ID or name + in: path + name: name + required: true + type: string produces: - application/json responses: "200": description: OK schema: - items: - $ref: '#/definitions/model.Preparation' - type: array + $ref: '#/definitions/model.Job' "400": description: Bad Request schema: @@ -11414,27 +16309,32 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: List all preparations + summary: Pause an ongoing scanning job tags: - - Preparation + - Job + /preparation/{id}/source/{name}/start-daggen: post: consumes: - application/json - operationId: CreatePreparation + operationId: StartDagGen parameters: - - description: Create Request - in: body - name: request + - description: Preparation ID or name + in: path + name: id required: true - schema: - $ref: '#/definitions/dataprep.CreateRequest' + type: string + - description: Storage ID or name + in: path + name: name + required: true + type: string produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Preparation' + $ref: '#/definitions/model.Job' "400": description: Bad Request schema: @@ -11443,18 +16343,30 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create a new preparation + summary: Start a new DAG generation job tags: - - Preparation - /preparation/{id}: - get: - operationId: GetPreparationStatus + - Job + /preparation/{id}/source/{name}/start-pack/{job_id}: + post: + consumes: + - application/json + operationId: StartPack parameters: - description: Preparation ID or name in: path name: id required: true - type: string + type: string + - description: Storage ID or name + in: path + name: name + required: true + type: string + - description: Pack Job ID + in: path + name: job_id + required: true + type: integer produces: - application/json responses: @@ -11462,7 +16374,7 @@ paths: description: OK schema: items: - $ref: '#/definitions/job.SourceStatus' + $ref: '#/definitions/model.Job' type: array "400": description: Bad Request @@ -11472,21 +16384,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Get the status of a preparation + summary: Start or restart a specific packing job tags: - - Preparation - /preparation/{id}/output/{name}: - delete: + - Job + /preparation/{id}/source/{name}/start-scan: + post: consumes: - application/json - operationId: RemoveOutputStorage + operationId: StartScan parameters: - description: Preparation ID or name in: path name: id required: true type: string - - description: Output storage ID or name + - description: Storage ID or name in: path name: name required: true @@ -11497,7 +16409,7 @@ paths: "200": description: OK schema: - $ref: '#/definitions/model.Preparation' + $ref: '#/definitions/model.Job' "400": description: Bad Request schema: @@ -11506,31 +16418,29 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Detach an output storage from a preparation + summary: Start a new scanning job tags: - - Preparation - post: + - Job + /preparation/{id}/wallet: + get: consumes: - application/json - operationId: AddOutputStorage + operationId: ListAttachedWallets parameters: - description: Preparation ID or name in: path name: id required: true type: string - - description: Output storage ID or name - in: path - name: name - required: true - type: string produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Preparation' + items: + $ref: '#/definitions/model.Wallet' + type: array "400": description: Bad Request schema: @@ -11539,29 +16449,32 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Attach an output storage with a preparation + summary: List all wallets of a preparation. tags: - - Preparation - /preparation/{id}/piece: - get: + - Wallet Association + /preparation/{id}/wallet/{wallet}: + delete: consumes: - application/json - operationId: ListPieces + operationId: DetachWallet parameters: - description: Preparation ID or name in: path name: id required: true type: string + - description: Wallet Address + in: path + name: wallet + required: true + type: string produces: - application/json responses: "200": description: OK schema: - items: - $ref: '#/definitions/dataprep.PieceList' - type: array + $ref: '#/definitions/model.Preparation' "400": description: Bad Request schema: @@ -11570,32 +16483,31 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: List all prepared pieces for a preparation + summary: Detach a new wallet from a preparation tags: - - Piece + - Wallet Association post: consumes: - application/json - operationId: AddPiece + operationId: AttachWallet parameters: - description: Preparation ID or name in: path name: id required: true type: string - - description: Piece information - in: body - name: request + - description: Wallet Address + in: path + name: wallet required: true - schema: - $ref: '#/definitions/dataprep.AddPieceRequest' + type: string produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Car' + $ref: '#/definitions/model.Preparation' "400": description: Bad Request schema: @@ -11604,34 +16516,26 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Add a piece to a preparation + summary: Attach a new wallet with a preparation tags: - - Piece - /preparation/{id}/piece/{piece_cid}: + - Wallet Association + /preparation/{name}: delete: consumes: - application/json - description: |- - Deletes a piece (CAR) and its associated records. For data pieces, resets file ranges - to allow re-packing. For DAG pieces, resets directory export flags for re-generation. - operationId: DeletePiece + operationId: RemovePreparation parameters: - description: Preparation ID or name in: path - name: id - required: true - type: string - - description: Piece CID - in: path - name: piece_cid + name: name required: true type: string - - description: Delete options + - description: Remove Request in: body name: request required: true schema: - $ref: '#/definitions/dataprep.DeletePieceRequest' + $ref: '#/definitions/dataprep.RemoveRequest' produces: - application/json responses: @@ -11641,37 +16545,37 @@ paths: description: Bad Request schema: $ref: '#/definitions/api.HTTPError' - "404": - description: Not Found - schema: - $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Delete a piece from a preparation + summary: Remove a preparation tags: - - Piece - /preparation/{id}/schedules: - get: + - Preparation + /preparation/{name}/rename: + patch: consumes: - application/json - operationId: ListPreparationSchedules + operationId: RenamePreparation parameters: - description: Preparation ID or name in: path - name: id + name: name required: true type: string + - description: New preparation name + in: body + name: request + required: true + schema: + $ref: '#/definitions/dataprep.RenameRequest' produces: - application/json responses: "200": description: OK schema: - items: - $ref: '#/definitions/model.Schedule' - type: array + $ref: '#/definitions/model.Preparation' "400": description: Bad Request schema: @@ -11680,32 +16584,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: List all schedules for a preparation + summary: Rename a preparation tags: - - Deal Schedule - /preparation/{id}/source/{name}: - post: - consumes: - - application/json - operationId: AddSourceStorage - parameters: - - description: Preparation ID or name - in: path - name: id - required: true - type: string - - description: Source storage ID or name - in: path - name: name - required: true - type: string + - Preparation + /schedule: + get: + operationId: ListSchedules produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Preparation' + items: + $ref: '#/definitions/model.Schedule' + type: array "400": description: Bad Request schema: @@ -11714,37 +16607,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Attach a source storage with a preparation + summary: List all deal making schedules tags: - - Preparation - /preparation/{id}/source/{name}/explore/{path}: - get: + - Deal Schedule + post: consumes: - application/json - operationId: ExplorePreparation + description: Create a new schedule + operationId: CreateSchedule parameters: - - description: Preparation ID or name - in: path - name: id - required: true - type: string - - description: Source storage ID or name - in: path - name: name - required: true - type: string - - description: Directory path - in: path - name: path + - description: CreateRequest + in: body + name: schedule required: true - type: string + schema: + $ref: '#/definitions/schedule.CreateRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/dataprep.ExploreResult' + $ref: '#/definitions/model.Schedule' "400": description: Bad Request schema: @@ -11753,40 +16637,23 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Explore a directory in a prepared source storage + summary: Create a new schedule tags: - - Preparation - /preparation/{id}/source/{name}/file: - post: - consumes: - - application/json - description: Tells Singularity that something is ready to be grabbed for data - preparation - operationId: PushFile + - Deal Schedule + /schedule/{id}: + delete: + operationId: RemoveSchedule parameters: - - description: Preparation ID or name + - description: Schedule ID in: path name: id required: true - type: string - - description: Source storage ID or name - in: path - name: name - required: true - type: string - - description: File Info - in: body - name: file - required: true - schema: - $ref: '#/definitions/file.Info' + type: integer produces: - application/json responses: - "200": - description: OK - schema: - $ref: '#/definitions/model.File' + "204": + description: No Content "400": description: Bad Request schema: @@ -11795,64 +16662,60 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Push a file to be queued + summary: Delete a specific schedule tags: - - File - /preparation/{id}/source/{name}/finalize: - post: + - Deal Schedule + patch: consumes: - application/json - operationId: PrepareToPackSource + description: Update a schedule + operationId: UpdateSchedule parameters: - - description: Preparation ID or name + - description: Schedule ID in: path name: id required: true - type: string - - description: Storage ID or name - in: path - name: name + type: integer + - description: Update request + in: body + name: body required: true - type: string + schema: + $ref: '#/definitions/schedule.UpdateRequest' produces: - application/json responses: - "204": - description: No Content + "200": + description: OK + schema: + $ref: '#/definitions/model.Schedule' "400": description: Bad Request schema: - type: string + $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: - type: string - summary: prepare to pack a data source + $ref: '#/definitions/api.HTTPError' + summary: Update a schedule tags: - - Job - /preparation/{id}/source/{name}/pause-daggen: - post: - consumes: - - application/json - operationId: PauseDagGen + - Deal Schedule + /schedule/{id}/pause: + post: + operationId: PauseSchedule parameters: - - description: Preparation ID or name + - description: Schedule ID in: path name: id required: true - type: string - - description: Storage ID or name - in: path - name: name - required: true - type: string + type: integer produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Job' + $ref: '#/definitions/model.Schedule' "400": description: Bad Request schema: @@ -11861,29 +16724,17 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Pause an ongoing DAG generation job + summary: Pause a specific schedule tags: - - Job - /preparation/{id}/source/{name}/pause-pack/{job_id}: + - Deal Schedule + /schedule/{id}/resume: post: - consumes: - - application/json - operationId: PausePack + operationId: ResumeSchedule parameters: - - description: Preparation ID or name + - description: Schedule ID in: path name: id required: true - type: string - - description: Storage ID or name - in: path - name: name - required: true - type: string - - description: Pack Job ID - in: path - name: job_id - required: true type: integer produces: - application/json @@ -11891,9 +16742,7 @@ paths: "200": description: OK schema: - items: - $ref: '#/definitions/model.Job' - type: array + $ref: '#/definitions/model.Schedule' "400": description: Bad Request schema: @@ -11902,32 +16751,29 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Pause a specific packing job + summary: Resume a specific schedule tags: - - Job - /preparation/{id}/source/{name}/pause-scan: + - Deal Schedule + /send_deal: post: consumes: - application/json - operationId: PauseScan + description: Send a manual deal proposal + operationId: SendManual parameters: - - description: Preparation ID or name - in: path - name: id - required: true - type: string - - description: Storage ID or name - in: path - name: name + - description: Proposal + in: body + name: proposal required: true - type: string + schema: + $ref: '#/definitions/deal.Proposal' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Job' + $ref: '#/definitions/model.Deal' "400": description: Bad Request schema: @@ -11936,32 +16782,23 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Pause an ongoing scanning job + summary: Send a manual deal proposal tags: - - Job - /preparation/{id}/source/{name}/start-daggen: - post: + - Deal + /storage: + get: consumes: - application/json - operationId: StartDagGen - parameters: - - description: Preparation ID or name - in: path - name: id - required: true - type: string - - description: Storage ID or name - in: path - name: name - required: true - type: string + operationId: ListStorages produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Job' + items: + $ref: '#/definitions/model.Storage' + type: array "400": description: Bad Request schema: @@ -11970,39 +16807,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Start a new DAG generation job + summary: List all storages tags: - - Job - /preparation/{id}/source/{name}/start-pack/{job_id}: - post: - consumes: - - application/json - operationId: StartPack + - Storage + /storage/{name}: + delete: + operationId: RemoveStorage parameters: - - description: Preparation ID or name - in: path - name: id - required: true - type: string - description: Storage ID or name in: path name: name required: true type: string - - description: Pack Job ID - in: path - name: job_id - required: true - type: integer - produces: - - application/json responses: - "200": - description: OK - schema: - items: - $ref: '#/definitions/model.Job' - type: array + "204": + description: No Content "400": description: Bad Request schema: @@ -12011,32 +16830,34 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Start or restart a specific packing job + summary: Remove a storage tags: - - Job - /preparation/{id}/source/{name}/start-scan: - post: + - Storage + patch: consumes: - application/json - operationId: StartScan + operationId: UpdateStorage parameters: - - description: Preparation ID or name - in: path - name: id - required: true - type: string - description: Storage ID or name in: path name: name required: true type: string + - description: Configuration + in: body + name: config + required: true + schema: + additionalProperties: + type: string + type: object produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Job' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12045,18 +16866,23 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Start a new scanning job + summary: Update a storage connection tags: - - Job - /preparation/{id}/wallet: + - Storage + /storage/{name}/explore/{path}: get: consumes: - application/json - operationId: ListAttachedWallets + operationId: ExploreStorage parameters: - - description: Preparation ID or name + - description: Storage ID or name in: path - name: id + name: name + required: true + type: string + - description: Path in the storage system to explore + in: path + name: path required: true type: string produces: @@ -12066,7 +16892,7 @@ paths: description: OK schema: items: - $ref: '#/definitions/model.Wallet' + $ref: '#/definitions/storage.DirEntry' type: array "400": description: Bad Request @@ -12076,32 +16902,33 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: List all wallets of a preparation. + summary: Explore directory entries in a storage system tags: - - Wallet Association - /preparation/{id}/wallet/{wallet}: - delete: + - Storage + /storage/{name}/rename: + patch: consumes: - application/json - operationId: DetachWallet + operationId: RenameStorage parameters: - - description: Preparation ID or name + - description: Storage ID or name in: path - name: id + name: name required: true type: string - - description: Wallet Address - in: path - name: wallet + - description: New storage name + in: body + name: request required: true - type: string + schema: + $ref: '#/definitions/storage.RenameRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Preparation' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12110,31 +16937,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Detach a new wallet from a preparation + summary: Rename a storage connection tags: - - Wallet Association + - Storage + /storage/azureblob: post: consumes: - application/json - operationId: AttachWallet + operationId: CreateAzureblobStorage parameters: - - description: Preparation ID or name - in: path - name: id - required: true - type: string - - description: Wallet Address - in: path - name: wallet + - description: Request body + in: body + name: request required: true - type: string + schema: + $ref: '#/definitions/storage.createAzureblobStorageRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Preparation' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12143,31 +16967,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Attach a new wallet with a preparation + summary: Create Azureblob storage tags: - - Wallet Association - /preparation/{name}: - delete: + - Storage + /storage/b2: + post: consumes: - application/json - operationId: RemovePreparation + operationId: CreateB2Storage parameters: - - description: Preparation ID or name - in: path - name: name - required: true - type: string - - description: Remove Request + - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/dataprep.RemoveRequest' + $ref: '#/definitions/storage.createB2StorageRequest' produces: - application/json responses: - "204": - description: No Content + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12176,33 +16997,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Remove a preparation + summary: Create B2 storage tags: - - Preparation - /preparation/{name}/rename: - patch: + - Storage + /storage/box: + post: consumes: - application/json - operationId: RenamePreparation + operationId: CreateBoxStorage parameters: - - description: Preparation ID or name - in: path - name: name - required: true - type: string - - description: New preparation name + - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/dataprep.RenameRequest' + $ref: '#/definitions/storage.createBoxStorageRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Preparation' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12211,21 +17027,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Rename a preparation + summary: Create Box storage tags: - - Preparation - /schedule: - get: - operationId: ListSchedules + - Storage + /storage/drive: + post: + consumes: + - application/json + operationId: CreateDriveStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createDriveStorageRequest' produces: - application/json responses: "200": description: OK schema: - items: - $ref: '#/definitions/model.Schedule' - type: array + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12234,28 +17057,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: List all deal making schedules + summary: Create Drive storage tags: - - Deal Schedule + - Storage + /storage/dropbox: post: consumes: - application/json - description: Create a new schedule - operationId: CreateSchedule + operationId: CreateDropboxStorage parameters: - - description: CreateRequest + - description: Request body in: body - name: schedule + name: request required: true schema: - $ref: '#/definitions/schedule.CreateRequest' + $ref: '#/definitions/storage.createDropboxStorageRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Schedule' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12264,23 +17087,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create a new schedule + summary: Create Dropbox storage tags: - - Deal Schedule - /schedule/{id}: - delete: - operationId: RemoveSchedule + - Storage + /storage/fichier: + post: + consumes: + - application/json + operationId: CreateFichierStorage parameters: - - description: Schedule ID - in: path - name: id + - description: Request body + in: body + name: request required: true - type: integer + schema: + $ref: '#/definitions/storage.createFichierStorageRequest' produces: - application/json responses: - "204": - description: No Content + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12289,33 +17117,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Delete a specific schedule + summary: Create Fichier storage tags: - - Deal Schedule - patch: + - Storage + /storage/filefabric: + post: consumes: - application/json - description: Update a schedule - operationId: UpdateSchedule + operationId: CreateFilefabricStorage parameters: - - description: Schedule ID - in: path - name: id - required: true - type: integer - - description: Update request + - description: Request body in: body - name: body + name: request required: true schema: - $ref: '#/definitions/schedule.UpdateRequest' + $ref: '#/definitions/storage.createFilefabricStorageRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Schedule' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12324,25 +17147,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Update a schedule + summary: Create Filefabric storage tags: - - Deal Schedule - /schedule/{id}/pause: + - Storage + /storage/ftp: post: - operationId: PauseSchedule + consumes: + - application/json + operationId: CreateFtpStorage parameters: - - description: Schedule ID - in: path - name: id + - description: Request body + in: body + name: request required: true - type: integer + schema: + $ref: '#/definitions/storage.createFtpStorageRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Schedule' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12351,25 +17177,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Pause a specific schedule + summary: Create Ftp storage tags: - - Deal Schedule - /schedule/{id}/resume: + - Storage + /storage/gcs: post: - operationId: ResumeSchedule + consumes: + - application/json + operationId: CreateGcsStorage parameters: - - description: Schedule ID - in: path - name: id + - description: Request body + in: body + name: request required: true - type: integer + schema: + $ref: '#/definitions/storage.createGcsStorageRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Schedule' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12378,29 +17207,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Resume a specific schedule + summary: Create Gcs storage tags: - - Deal Schedule - /send_deal: + - Storage + /storage/gphotos: post: consumes: - application/json - description: Send a manual deal proposal - operationId: SendManual + operationId: CreateGphotosStorage parameters: - - description: Proposal + - description: Request body in: body - name: proposal + name: request required: true schema: - $ref: '#/definitions/deal.Proposal' + $ref: '#/definitions/storage.createGphotosStorageRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/model.Deal' + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12409,46 +17237,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Send a manual deal proposal + summary: Create Gphotos storage tags: - - Deal - /storage: - get: + - Storage + /storage/hdfs: + post: consumes: - application/json - operationId: ListStorages + operationId: CreateHdfsStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createHdfsStorageRequest' produces: - application/json responses: "200": description: OK schema: - items: - $ref: '#/definitions/model.Storage' - type: array - "400": - description: Bad Request - schema: - $ref: '#/definitions/api.HTTPError' - "500": - description: Internal Server Error - schema: - $ref: '#/definitions/api.HTTPError' - summary: List all storages - tags: - - Storage - /storage/{name}: - delete: - operationId: RemoveStorage - parameters: - - description: Storage ID or name - in: path - name: name - required: true - type: string - responses: - "204": - description: No Content + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12457,27 +17267,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Remove a storage + summary: Create Hdfs storage tags: - Storage - patch: + /storage/hidrive: + post: consumes: - application/json - operationId: UpdateStorage + operationId: CreateHidriveStorage parameters: - - description: Storage ID or name - in: path - name: name - required: true - type: string - - description: Configuration + - description: Request body in: body - name: config + name: request required: true schema: - additionalProperties: - type: string - type: object + $ref: '#/definitions/storage.createHidriveStorageRequest' produces: - application/json responses: @@ -12493,34 +17297,28 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Update a storage connection + summary: Create Hidrive storage tags: - Storage - /storage/{name}/explore/{path}: - get: + /storage/http: + post: consumes: - application/json - operationId: ExploreStorage + operationId: CreateHttpStorage parameters: - - description: Storage ID or name - in: path - name: name - required: true - type: string - - description: Path in the storage system to explore - in: path - name: path + - description: Request body + in: body + name: request required: true - type: string + schema: + $ref: '#/definitions/storage.createHttpStorageRequest' produces: - application/json responses: "200": description: OK schema: - items: - $ref: '#/definitions/storage.DirEntry' - type: array + $ref: '#/definitions/model.Storage' "400": description: Bad Request schema: @@ -12529,26 +17327,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Explore directory entries in a storage system + summary: Create Http storage tags: - Storage - /storage/{name}/rename: - patch: + /storage/internetarchive: + post: consumes: - - application/json - operationId: RenameStorage - parameters: - - description: Storage ID or name - in: path - name: name - required: true - type: string - - description: New storage name + - application/json + operationId: CreateInternetarchiveStorage + parameters: + - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.RenameRequest' + $ref: '#/definitions/storage.createInternetarchiveStorageRequest' produces: - application/json responses: @@ -12564,21 +17357,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Rename a storage connection + summary: Create Internetarchive storage tags: - Storage - /storage/azureblob: + /storage/jottacloud: post: consumes: - application/json - operationId: CreateAzureblobStorage + operationId: CreateJottacloudStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createAzureblobStorageRequest' + $ref: '#/definitions/storage.createJottacloudStorageRequest' produces: - application/json responses: @@ -12594,21 +17387,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Azureblob storage + summary: Create Jottacloud storage tags: - Storage - /storage/b2: + /storage/koofr/digistorage: post: consumes: - application/json - operationId: CreateB2Storage + operationId: CreateKoofrDigistorageStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createB2StorageRequest' + $ref: '#/definitions/storage.createKoofrDigistorageStorageRequest' produces: - application/json responses: @@ -12624,21 +17417,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create B2 storage + summary: Create Koofr storage with digistorage - Digi Storage, https://storage.rcs-rds.ro/ tags: - Storage - /storage/box: + /storage/koofr/koofr: post: consumes: - application/json - operationId: CreateBoxStorage + operationId: CreateKoofrKoofrStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createBoxStorageRequest' + $ref: '#/definitions/storage.createKoofrKoofrStorageRequest' produces: - application/json responses: @@ -12654,21 +17447,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Box storage + summary: Create Koofr storage with koofr - Koofr, https://app.koofr.net/ tags: - Storage - /storage/drive: + /storage/koofr/other: post: consumes: - application/json - operationId: CreateDriveStorage + operationId: CreateKoofrOtherStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createDriveStorageRequest' + $ref: '#/definitions/storage.createKoofrOtherStorageRequest' produces: - application/json responses: @@ -12684,21 +17477,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Drive storage + summary: Create Koofr storage with other - Any other Koofr API compatible storage + service tags: - Storage - /storage/dropbox: + /storage/local: post: consumes: - application/json - operationId: CreateDropboxStorage + operationId: CreateLocalStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createDropboxStorageRequest' + $ref: '#/definitions/storage.createLocalStorageRequest' produces: - application/json responses: @@ -12714,21 +17508,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Dropbox storage + summary: Create Local storage tags: - Storage - /storage/fichier: + /storage/mailru: post: consumes: - application/json - operationId: CreateFichierStorage + operationId: CreateMailruStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createFichierStorageRequest' + $ref: '#/definitions/storage.createMailruStorageRequest' produces: - application/json responses: @@ -12744,21 +17538,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Fichier storage + summary: Create Mailru storage tags: - Storage - /storage/filefabric: + /storage/mega: post: consumes: - application/json - operationId: CreateFilefabricStorage + operationId: CreateMegaStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createFilefabricStorageRequest' + $ref: '#/definitions/storage.createMegaStorageRequest' produces: - application/json responses: @@ -12774,21 +17568,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Filefabric storage + summary: Create Mega storage tags: - Storage - /storage/ftp: + /storage/netstorage: post: consumes: - application/json - operationId: CreateFtpStorage + operationId: CreateNetstorageStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createFtpStorageRequest' + $ref: '#/definitions/storage.createNetstorageStorageRequest' produces: - application/json responses: @@ -12804,21 +17598,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Ftp storage + summary: Create Netstorage storage tags: - Storage - /storage/gcs: + /storage/onedrive: post: consumes: - application/json - operationId: CreateGcsStorage + operationId: CreateOnedriveStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createGcsStorageRequest' + $ref: '#/definitions/storage.createOnedriveStorageRequest' produces: - application/json responses: @@ -12834,21 +17628,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Gcs storage + summary: Create Onedrive storage tags: - Storage - /storage/gphotos: + /storage/oos/env_auth: post: consumes: - application/json - operationId: CreateGphotosStorage + operationId: CreateOosEnv_authStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createGphotosStorageRequest' + $ref: '#/definitions/storage.createOosEnv_authStorageRequest' produces: - application/json responses: @@ -12864,21 +17658,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Gphotos storage + summary: Create Oos storage with env_auth - automatically pickup the credentials + from runtime(env), first one to provide auth wins tags: - Storage - /storage/hdfs: + /storage/oos/instance_principal_auth: post: consumes: - application/json - operationId: CreateHdfsStorage + operationId: CreateOosInstance_principal_authStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createHdfsStorageRequest' + $ref: '#/definitions/storage.createOosInstance_principal_authStorageRequest' produces: - application/json responses: @@ -12894,21 +17689,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Hdfs storage + summary: Create Oos storage with instance_principal_auth - use instance principals + to authorize an instance to make API calls. tags: - Storage - /storage/hidrive: + /storage/oos/no_auth: post: consumes: - application/json - operationId: CreateHidriveStorage + operationId: CreateOosNo_authStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createHidriveStorageRequest' + $ref: '#/definitions/storage.createOosNo_authStorageRequest' produces: - application/json responses: @@ -12924,21 +17720,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Hidrive storage + summary: Create Oos storage with no_auth - no credentials needed, this is typically + for reading public buckets tags: - Storage - /storage/http: + /storage/oos/resource_principal_auth: post: consumes: - application/json - operationId: CreateHttpStorage + operationId: CreateOosResource_principal_authStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createHttpStorageRequest' + $ref: '#/definitions/storage.createOosResource_principal_authStorageRequest' produces: - application/json responses: @@ -12954,21 +17751,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Http storage + summary: Create Oos storage with resource_principal_auth - use resource principals + to make API calls tags: - Storage - /storage/internetarchive: + /storage/oos/user_principal_auth: post: consumes: - application/json - operationId: CreateInternetarchiveStorage + operationId: CreateOosUser_principal_authStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createInternetarchiveStorageRequest' + $ref: '#/definitions/storage.createOosUser_principal_authStorageRequest' produces: - application/json responses: @@ -12984,21 +17782,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Internetarchive storage + summary: Create Oos storage with user_principal_auth - use an OCI user and an + API key for authentication. tags: - Storage - /storage/jottacloud: + /storage/oos/workload_identity_auth: post: consumes: - application/json - operationId: CreateJottacloudStorage + operationId: CreateOosWorkload_identity_authStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createJottacloudStorageRequest' + $ref: '#/definitions/storage.createOosWorkload_identity_authStorageRequest' produces: - application/json responses: @@ -13014,21 +17813,23 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Jottacloud storage + summary: Create Oos storage with workload_identity_auth - use workload identity + to grant OCI Container Engine for Kubernetes workloads policy-driven access + to OCI resources using OCI Identity and Access Management (IAM). tags: - Storage - /storage/koofr/digistorage: + /storage/opendrive: post: consumes: - application/json - operationId: CreateKoofrDigistorageStorage + operationId: CreateOpendriveStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createKoofrDigistorageStorageRequest' + $ref: '#/definitions/storage.createOpendriveStorageRequest' produces: - application/json responses: @@ -13044,21 +17845,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Koofr storage with digistorage - Digi Storage, https://storage.rcs-rds.ro/ + summary: Create Opendrive storage tags: - Storage - /storage/koofr/koofr: + /storage/pcloud: post: consumes: - application/json - operationId: CreateKoofrKoofrStorage + operationId: CreatePcloudStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createKoofrKoofrStorageRequest' + $ref: '#/definitions/storage.createPcloudStorageRequest' produces: - application/json responses: @@ -13074,21 +17875,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Koofr storage with koofr - Koofr, https://app.koofr.net/ + summary: Create Pcloud storage tags: - Storage - /storage/koofr/other: + /storage/premiumizeme: post: consumes: - application/json - operationId: CreateKoofrOtherStorage + operationId: CreatePremiumizemeStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createKoofrOtherStorageRequest' + $ref: '#/definitions/storage.createPremiumizemeStorageRequest' produces: - application/json responses: @@ -13104,22 +17905,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Koofr storage with other - Any other Koofr API compatible storage - service + summary: Create Premiumizeme storage tags: - Storage - /storage/local: + /storage/putio: post: consumes: - application/json - operationId: CreateLocalStorage + operationId: CreatePutioStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createLocalStorageRequest' + $ref: '#/definitions/storage.createPutioStorageRequest' produces: - application/json responses: @@ -13135,21 +17935,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Local storage + summary: Create Putio storage tags: - Storage - /storage/mailru: + /storage/qingstor: post: consumes: - application/json - operationId: CreateMailruStorage + operationId: CreateQingstorStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createMailruStorageRequest' + $ref: '#/definitions/storage.createQingstorStorageRequest' produces: - application/json responses: @@ -13165,21 +17965,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Mailru storage + summary: Create Qingstor storage tags: - Storage - /storage/mega: + /storage/s3/alibaba: post: consumes: - application/json - operationId: CreateMegaStorage + operationId: CreateS3AlibabaStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createMegaStorageRequest' + $ref: '#/definitions/storage.createS3AlibabaStorageRequest' produces: - application/json responses: @@ -13195,21 +17995,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Mega storage + summary: Create S3 storage with Alibaba - Alibaba Cloud Object Storage System + (OSS) formerly Aliyun tags: - Storage - /storage/netstorage: + /storage/s3/arvancloud: post: consumes: - application/json - operationId: CreateNetstorageStorage + operationId: CreateS3ArvanCloudStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createNetstorageStorageRequest' + $ref: '#/definitions/storage.createS3ArvanCloudStorageRequest' produces: - application/json responses: @@ -13225,21 +18026,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Netstorage storage + summary: Create S3 storage with ArvanCloud - Arvan Cloud Object Storage (AOS) tags: - Storage - /storage/onedrive: + /storage/s3/aws: post: consumes: - application/json - operationId: CreateOnedriveStorage + operationId: CreateS3AWSStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createOnedriveStorageRequest' + $ref: '#/definitions/storage.createS3AWSStorageRequest' produces: - application/json responses: @@ -13255,21 +18056,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Onedrive storage + summary: Create S3 storage with AWS - Amazon Web Services (AWS) S3 tags: - Storage - /storage/oos/env_auth: + /storage/s3/bizflycloud: post: consumes: - application/json - operationId: CreateOosEnv_authStorage + operationId: CreateS3BizflyCloudStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createOosEnv_authStorageRequest' + $ref: '#/definitions/storage.createS3BizflyCloudStorageRequest' produces: - application/json responses: @@ -13285,22 +18086,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Oos storage with env_auth - automatically pickup the credentials - from runtime(env), first one to provide auth wins + summary: Create S3 storage with BizflyCloud - Bizfly Cloud Simple Storage tags: - Storage - /storage/oos/instance_principal_auth: + /storage/s3/ceph: post: consumes: - application/json - operationId: CreateOosInstance_principal_authStorage + operationId: CreateS3CephStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createOosInstance_principal_authStorageRequest' + $ref: '#/definitions/storage.createS3CephStorageRequest' produces: - application/json responses: @@ -13316,22 +18116,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Oos storage with instance_principal_auth - use instance principals - to authorize an instance to make API calls. + summary: Create S3 storage with Ceph - Ceph Object Storage tags: - Storage - /storage/oos/no_auth: + /storage/s3/chinamobile: post: consumes: - application/json - operationId: CreateOosNo_authStorage + operationId: CreateS3ChinaMobileStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createOosNo_authStorageRequest' + $ref: '#/definitions/storage.createS3ChinaMobileStorageRequest' produces: - application/json responses: @@ -13347,22 +18146,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Oos storage with no_auth - no credentials needed, this is typically - for reading public buckets + summary: Create S3 storage with ChinaMobile - China Mobile Ecloud Elastic Object + Storage (EOS) tags: - Storage - /storage/oos/resource_principal_auth: + /storage/s3/cloudflare: post: consumes: - application/json - operationId: CreateOosResource_principal_authStorage + operationId: CreateS3CloudflareStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createOosResource_principal_authStorageRequest' + $ref: '#/definitions/storage.createS3CloudflareStorageRequest' produces: - application/json responses: @@ -13378,22 +18177,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Oos storage with resource_principal_auth - use resource principals - to make API calls + summary: Create S3 storage with Cloudflare - Cloudflare R2 Storage tags: - Storage - /storage/oos/user_principal_auth: + /storage/s3/cubbit: post: consumes: - application/json - operationId: CreateOosUser_principal_authStorage + operationId: CreateS3CubbitStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createOosUser_principal_authStorageRequest' + $ref: '#/definitions/storage.createS3CubbitStorageRequest' produces: - application/json responses: @@ -13409,22 +18207,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Oos storage with user_principal_auth - use an OCI user and an - API key for authentication. + summary: Create S3 storage with Cubbit - Cubbit DS3 Object Storage tags: - Storage - /storage/oos/workload_identity_auth: + /storage/s3/digitalocean: post: consumes: - application/json - operationId: CreateOosWorkload_identity_authStorage + operationId: CreateS3DigitalOceanStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createOosWorkload_identity_authStorageRequest' + $ref: '#/definitions/storage.createS3DigitalOceanStorageRequest' produces: - application/json responses: @@ -13440,23 +18237,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Oos storage with workload_identity_auth - use workload identity - to grant OCI Container Engine for Kubernetes workloads policy-driven access - to OCI resources using OCI Identity and Access Management (IAM). + summary: Create S3 storage with DigitalOcean - DigitalOcean Spaces tags: - Storage - /storage/opendrive: + /storage/s3/dreamhost: post: consumes: - application/json - operationId: CreateOpendriveStorage + operationId: CreateS3DreamhostStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createOpendriveStorageRequest' + $ref: '#/definitions/storage.createS3DreamhostStorageRequest' produces: - application/json responses: @@ -13472,21 +18267,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Opendrive storage + summary: Create S3 storage with Dreamhost - Dreamhost DreamObjects tags: - Storage - /storage/pcloud: + /storage/s3/exaba: post: consumes: - application/json - operationId: CreatePcloudStorage + operationId: CreateS3ExabaStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createPcloudStorageRequest' + $ref: '#/definitions/storage.createS3ExabaStorageRequest' produces: - application/json responses: @@ -13502,21 +18297,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Pcloud storage + summary: Create S3 storage with Exaba - Exaba Object Storage tags: - Storage - /storage/premiumizeme: + /storage/s3/filelu: post: consumes: - application/json - operationId: CreatePremiumizemeStorage + operationId: CreateS3FileLuStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createPremiumizemeStorageRequest' + $ref: '#/definitions/storage.createS3FileLuStorageRequest' produces: - application/json responses: @@ -13532,21 +18327,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Premiumizeme storage + summary: Create S3 storage with FileLu - FileLu S5 (S3-Compatible Object Storage) tags: - Storage - /storage/putio: + /storage/s3/flashblade: post: consumes: - application/json - operationId: CreatePutioStorage + operationId: CreateS3FlashBladeStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createPutioStorageRequest' + $ref: '#/definitions/storage.createS3FlashBladeStorageRequest' produces: - application/json responses: @@ -13562,21 +18357,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Putio storage + summary: Create S3 storage with FlashBlade - Pure Storage FlashBlade Object + Storage tags: - Storage - /storage/qingstor: + /storage/s3/gcs: post: consumes: - application/json - operationId: CreateQingstorStorage + operationId: CreateS3GCSStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createQingstorStorageRequest' + $ref: '#/definitions/storage.createS3GCSStorageRequest' produces: - application/json responses: @@ -13592,21 +18388,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create Qingstor storage + summary: Create S3 storage with GCS - Google Cloud Storage tags: - Storage - /storage/s3/alibaba: + /storage/s3/hetzner: post: consumes: - application/json - operationId: CreateS3AlibabaStorage + operationId: CreateS3HetznerStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3AlibabaStorageRequest' + $ref: '#/definitions/storage.createS3HetznerStorageRequest' produces: - application/json responses: @@ -13622,22 +18418,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Alibaba - Alibaba Cloud Object Storage System - (OSS) formerly Aliyun + summary: Create S3 storage with Hetzner - Hetzner Object Storage tags: - Storage - /storage/s3/arvancloud: + /storage/s3/huaweiobs: post: consumes: - application/json - operationId: CreateS3ArvanCloudStorage + operationId: CreateS3HuaweiOBSStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3ArvanCloudStorageRequest' + $ref: '#/definitions/storage.createS3HuaweiOBSStorageRequest' produces: - application/json responses: @@ -13653,21 +18448,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with ArvanCloud - Arvan Cloud Object Storage (AOS) + summary: Create S3 storage with HuaweiOBS - Huawei Object Storage Service tags: - Storage - /storage/s3/aws: + /storage/s3/ibmcos: post: consumes: - application/json - operationId: CreateS3AWSStorage + operationId: CreateS3IBMCOSStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3AWSStorageRequest' + $ref: '#/definitions/storage.createS3IBMCOSStorageRequest' produces: - application/json responses: @@ -13683,21 +18478,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with AWS - Amazon Web Services (AWS) S3 + summary: Create S3 storage with IBMCOS - IBM COS S3 tags: - Storage - /storage/s3/ceph: + /storage/s3/idrive: post: consumes: - application/json - operationId: CreateS3CephStorage + operationId: CreateS3IDriveStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3CephStorageRequest' + $ref: '#/definitions/storage.createS3IDriveStorageRequest' produces: - application/json responses: @@ -13713,21 +18508,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Ceph - Ceph Object Storage + summary: Create S3 storage with IDrive - IDrive e2 tags: - Storage - /storage/s3/chinamobile: + /storage/s3/intercolo: post: consumes: - application/json - operationId: CreateS3ChinaMobileStorage + operationId: CreateS3IntercoloStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3ChinaMobileStorageRequest' + $ref: '#/definitions/storage.createS3IntercoloStorageRequest' produces: - application/json responses: @@ -13743,22 +18538,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with ChinaMobile - China Mobile Ecloud Elastic Object - Storage (EOS) + summary: Create S3 storage with Intercolo - Intercolo Object Storage tags: - Storage - /storage/s3/cloudflare: + /storage/s3/ionos: post: consumes: - application/json - operationId: CreateS3CloudflareStorage + operationId: CreateS3IONOSStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3CloudflareStorageRequest' + $ref: '#/definitions/storage.createS3IONOSStorageRequest' produces: - application/json responses: @@ -13774,21 +18568,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Cloudflare - Cloudflare R2 Storage + summary: Create S3 storage with IONOS - IONOS Cloud tags: - Storage - /storage/s3/digitalocean: + /storage/s3/leviia: post: consumes: - application/json - operationId: CreateS3DigitalOceanStorage + operationId: CreateS3LeviiaStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3DigitalOceanStorageRequest' + $ref: '#/definitions/storage.createS3LeviiaStorageRequest' produces: - application/json responses: @@ -13804,21 +18598,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with DigitalOcean - DigitalOcean Spaces + summary: Create S3 storage with Leviia - Leviia Object Storage tags: - Storage - /storage/s3/dreamhost: + /storage/s3/liara: post: consumes: - application/json - operationId: CreateS3DreamhostStorage + operationId: CreateS3LiaraStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3DreamhostStorageRequest' + $ref: '#/definitions/storage.createS3LiaraStorageRequest' produces: - application/json responses: @@ -13834,21 +18628,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Dreamhost - Dreamhost DreamObjects + summary: Create S3 storage with Liara - Liara Object Storage tags: - Storage - /storage/s3/gcs: + /storage/s3/linode: post: consumes: - application/json - operationId: CreateS3GCSStorage + operationId: CreateS3LinodeStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3GCSStorageRequest' + $ref: '#/definitions/storage.createS3LinodeStorageRequest' produces: - application/json responses: @@ -13864,21 +18658,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with GCS - Google Cloud Storage + summary: Create S3 storage with Linode - Linode Object Storage tags: - Storage - /storage/s3/huaweiobs: + /storage/s3/lyvecloud: post: consumes: - application/json - operationId: CreateS3HuaweiOBSStorage + operationId: CreateS3LyveCloudStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3HuaweiOBSStorageRequest' + $ref: '#/definitions/storage.createS3LyveCloudStorageRequest' produces: - application/json responses: @@ -13894,21 +18688,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with HuaweiOBS - Huawei Object Storage Service + summary: Create S3 storage with LyveCloud - Seagate Lyve Cloud tags: - Storage - /storage/s3/ibmcos: + /storage/s3/magalu: post: consumes: - application/json - operationId: CreateS3IBMCOSStorage + operationId: CreateS3MagaluStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3IBMCOSStorageRequest' + $ref: '#/definitions/storage.createS3MagaluStorageRequest' produces: - application/json responses: @@ -13924,21 +18718,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with IBMCOS - IBM COS S3 + summary: Create S3 storage with Magalu - Magalu Object Storage tags: - Storage - /storage/s3/idrive: + /storage/s3/mega: post: consumes: - application/json - operationId: CreateS3IDriveStorage + operationId: CreateS3MegaStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3IDriveStorageRequest' + $ref: '#/definitions/storage.createS3MegaStorageRequest' produces: - application/json responses: @@ -13954,21 +18748,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with IDrive - IDrive e2 + summary: Create S3 storage with Mega - MEGA S4 Object Storage tags: - Storage - /storage/s3/ionos: + /storage/s3/minio: post: consumes: - application/json - operationId: CreateS3IONOSStorage + operationId: CreateS3MinioStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3IONOSStorageRequest' + $ref: '#/definitions/storage.createS3MinioStorageRequest' produces: - application/json responses: @@ -13984,21 +18778,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with IONOS - IONOS Cloud + summary: Create S3 storage with Minio - Minio Object Storage tags: - Storage - /storage/s3/leviia: + /storage/s3/netease: post: consumes: - application/json - operationId: CreateS3LeviiaStorage + operationId: CreateS3NeteaseStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3LeviiaStorageRequest' + $ref: '#/definitions/storage.createS3NeteaseStorageRequest' produces: - application/json responses: @@ -14014,21 +18808,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Leviia - Leviia Object Storage + summary: Create S3 storage with Netease - Netease Object Storage (NOS) tags: - Storage - /storage/s3/liara: + /storage/s3/other: post: consumes: - application/json - operationId: CreateS3LiaraStorage + operationId: CreateS3OtherStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3LiaraStorageRequest' + $ref: '#/definitions/storage.createS3OtherStorageRequest' produces: - application/json responses: @@ -14044,21 +18838,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Liara - Liara Object Storage + summary: Create S3 storage with Other - Any other S3 compatible provider tags: - Storage - /storage/s3/linode: + /storage/s3/outscale: post: consumes: - application/json - operationId: CreateS3LinodeStorage + operationId: CreateS3OutscaleStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3LinodeStorageRequest' + $ref: '#/definitions/storage.createS3OutscaleStorageRequest' produces: - application/json responses: @@ -14074,21 +18868,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Linode - Linode Object Storage + summary: Create S3 storage with Outscale - OUTSCALE Object Storage (OOS) tags: - Storage - /storage/s3/lyvecloud: + /storage/s3/ovhcloud: post: consumes: - application/json - operationId: CreateS3LyveCloudStorage + operationId: CreateS3OVHcloudStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3LyveCloudStorageRequest' + $ref: '#/definitions/storage.createS3OVHcloudStorageRequest' produces: - application/json responses: @@ -14104,21 +18898,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with LyveCloud - Seagate Lyve Cloud + summary: Create S3 storage with OVHcloud - OVHcloud Object Storage tags: - Storage - /storage/s3/magalu: + /storage/s3/petabox: post: consumes: - application/json - operationId: CreateS3MagaluStorage + operationId: CreateS3PetaboxStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3MagaluStorageRequest' + $ref: '#/definitions/storage.createS3PetaboxStorageRequest' produces: - application/json responses: @@ -14134,21 +18928,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Magalu - Magalu Object Storage + summary: Create S3 storage with Petabox - Petabox Object Storage tags: - Storage - /storage/s3/minio: + /storage/s3/qiniu: post: consumes: - application/json - operationId: CreateS3MinioStorage + operationId: CreateS3QiniuStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3MinioStorageRequest' + $ref: '#/definitions/storage.createS3QiniuStorageRequest' produces: - application/json responses: @@ -14164,21 +18958,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Minio - Minio Object Storage + summary: Create S3 storage with Qiniu - Qiniu Object Storage (Kodo) tags: - Storage - /storage/s3/netease: + /storage/s3/rabata: post: consumes: - application/json - operationId: CreateS3NeteaseStorage + operationId: CreateS3RabataStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3NeteaseStorageRequest' + $ref: '#/definitions/storage.createS3RabataStorageRequest' produces: - application/json responses: @@ -14194,21 +18988,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Netease - Netease Object Storage (NOS) + summary: Create S3 storage with Rabata - Rabata Cloud Storage tags: - Storage - /storage/s3/other: + /storage/s3/rackcorp: post: consumes: - application/json - operationId: CreateS3OtherStorage + operationId: CreateS3RackCorpStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3OtherStorageRequest' + $ref: '#/definitions/storage.createS3RackCorpStorageRequest' produces: - application/json responses: @@ -14224,21 +19018,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Other - Any other S3 compatible provider + summary: Create S3 storage with RackCorp - RackCorp Object Storage tags: - Storage - /storage/s3/petabox: + /storage/s3/rclone: post: consumes: - application/json - operationId: CreateS3PetaboxStorage + operationId: CreateS3RcloneStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3PetaboxStorageRequest' + $ref: '#/definitions/storage.createS3RcloneStorageRequest' produces: - application/json responses: @@ -14254,21 +19048,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Petabox - Petabox Object Storage + summary: Create S3 storage with Rclone - Rclone S3 Server tags: - Storage - /storage/s3/qiniu: + /storage/s3/scaleway: post: consumes: - application/json - operationId: CreateS3QiniuStorage + operationId: CreateS3ScalewayStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3QiniuStorageRequest' + $ref: '#/definitions/storage.createS3ScalewayStorageRequest' produces: - application/json responses: @@ -14284,21 +19078,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Qiniu - Qiniu Object Storage (Kodo) + summary: Create S3 storage with Scaleway - Scaleway Object Storage tags: - Storage - /storage/s3/rackcorp: + /storage/s3/seaweedfs: post: consumes: - application/json - operationId: CreateS3RackCorpStorage + operationId: CreateS3SeaweedFSStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3RackCorpStorageRequest' + $ref: '#/definitions/storage.createS3SeaweedFSStorageRequest' produces: - application/json responses: @@ -14314,21 +19108,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with RackCorp - RackCorp Object Storage + summary: Create S3 storage with SeaweedFS - SeaweedFS S3 tags: - Storage - /storage/s3/rclone: + /storage/s3/selectel: post: consumes: - application/json - operationId: CreateS3RcloneStorage + operationId: CreateS3SelectelStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3RcloneStorageRequest' + $ref: '#/definitions/storage.createS3SelectelStorageRequest' produces: - application/json responses: @@ -14344,21 +19138,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Rclone - Rclone S3 Server + summary: Create S3 storage with Selectel - Selectel Object Storage tags: - Storage - /storage/s3/scaleway: + /storage/s3/servercore: post: consumes: - application/json - operationId: CreateS3ScalewayStorage + operationId: CreateS3ServercoreStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3ScalewayStorageRequest' + $ref: '#/definitions/storage.createS3ServercoreStorageRequest' produces: - application/json responses: @@ -14374,21 +19168,21 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with Scaleway - Scaleway Object Storage + summary: Create S3 storage with Servercore - Servercore Object Storage tags: - Storage - /storage/s3/seaweedfs: + /storage/s3/spectralogic: post: consumes: - application/json - operationId: CreateS3SeaweedFSStorage + operationId: CreateS3SpectraLogicStorage parameters: - description: Request body in: body name: request required: true schema: - $ref: '#/definitions/storage.createS3SeaweedFSStorageRequest' + $ref: '#/definitions/storage.createS3SpectraLogicStorageRequest' produces: - application/json responses: @@ -14404,7 +19198,7 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Create S3 storage with SeaweedFS - SeaweedFS S3 + summary: Create S3 storage with SpectraLogic - Spectra Logic Black Pearl tags: - Storage /storage/s3/stackpath: @@ -14557,6 +19351,36 @@ paths: summary: Create S3 storage with Wasabi - Wasabi Object Storage tags: - Storage + /storage/s3/zata: + post: + consumes: + - application/json + operationId: CreateS3ZataStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createS3ZataStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create S3 storage with Zata - Zata (S3 compatible Gateway) + tags: + - Storage /storage/seafile: post: consumes: @@ -14858,36 +19682,6 @@ paths: summary: Create Union storage tags: - Storage - /storage/uptobox: - post: - consumes: - - application/json - operationId: CreateUptoboxStorage - parameters: - - description: Request body - in: body - name: request - required: true - schema: - $ref: '#/definitions/storage.createUptoboxStorageRequest' - produces: - - application/json - responses: - "200": - description: OK - schema: - $ref: '#/definitions/model.Storage' - "400": - description: Bad Request - schema: - $ref: '#/definitions/api.HTTPError' - "500": - description: Internal Server Error - schema: - $ref: '#/definitions/api.HTTPError' - summary: Create Uptobox storage - tags: - - Storage /storage/webdav: post: consumes: diff --git a/docs/zh/SUMMARY.md b/docs/zh/SUMMARY.md index 821d3d45e..ecf77f563 100644 --- a/docs/zh/SUMMARY.md +++ b/docs/zh/SUMMARY.md @@ -137,7 +137,6 @@ * [新建](cli-reference/storage/create/storj/new.md) * [蓝奏云](cli-reference/storage/create/sugarsync.md) * [Swift](cli-reference/storage/create/swift.md) - * [Uptobox](cli-reference/storage/create/uptobox.md) * [Webdav](cli-reference/storage/create/webdav.md) * [Yandex](cli-reference/storage/create/yandex.md) * [Zoho](cli-reference/storage/create/zoho.md) @@ -217,7 +216,6 @@ * [新建](cli-reference/storage/update/storj/new.md) * [蓝奏云](cli-reference/storage/update/sugarsync.md) * [Swift](cli-reference/storage/update/swift.md) - * [Uptobox](cli-reference/storage/update/uptobox.md) * [Webdav](cli-reference/storage/update/webdav.md) * [Yandex](cli-reference/storage/update/yandex.md) * [Zoho](cli-reference/storage/update/zoho.md) diff --git a/docs/zh/cli-reference/run/content-provider.md b/docs/zh/cli-reference/run/content-provider.md index 5b4b27c83..1585416af 100644 --- a/docs/zh/cli-reference/run/content-provider.md +++ b/docs/zh/cli-reference/run/content-provider.md @@ -11,11 +11,9 @@ 选项: --help, -h 显示帮助信息 - Bitswap检索 + HTTP IPFS Gateway - --enable-bitswap 启用bitswap检索(默认:false) - --libp2p-identity-key value libp2p对等节点的base64编码私钥(默认:自动生成) - --libp2p-listen value [ --libp2p-listen value ] 用于libp2p连接的监听地址 + --enable-http-ipfs Enable trustless IPFS gateway on /ipfs/ (default: true) HTTP检索 diff --git a/docs/zh/cli-reference/run/download-server.md b/docs/zh/cli-reference/run/download-server.md index 709206116..d3969deb9 100644 --- a/docs/zh/cli-reference/run/download-server.md +++ b/docs/zh/cli-reference/run/download-server.md @@ -213,10 +213,6 @@ NAME: --sugarsync-private-access-key value Sugarsync私有访问密钥。 [$SUGARSYNC_PRIVATE_ACCESS_KEY] --sugarsync-refresh-token value Sugarsync刷新令牌。 [$SUGARSYNC_REFRESH_TOKEN] - Uptobox - - --uptobox-access-token value 你的访问令牌。 [$UPTOBOX_ACCESS_TOKEN] - WebDAV --webdav-bearer-token value 用户/密码代替带有基本令牌的令牌(如Macaroon)。 [$WEBDAV_BEARER_TOKEN] diff --git a/docs/zh/cli-reference/storage/create/README.md b/docs/zh/cli-reference/storage/create/README.md index 5ed74c217..0f2a6d6a7 100644 --- a/docs/zh/cli-reference/storage/create/README.md +++ b/docs/zh/cli-reference/storage/create/README.md @@ -46,7 +46,6 @@ storj Storj 分布式云存储 sugarsync Sugarsync swift OpenStack Swift(Rackspace Cloud Files、Memset Memstore、OVH) - uptobox Uptobox webdav WebDAV yandex Yandex Disk zoho Zoho diff --git a/docs/zh/cli-reference/storage/create/uptobox.md b/docs/zh/cli-reference/storage/create/uptobox.md deleted file mode 100644 index 763a3fa8d..000000000 --- a/docs/zh/cli-reference/storage/create/uptobox.md +++ /dev/null @@ -1,35 +0,0 @@ -# Uptobox - -{% code fullWidth="true" %} -``` -命令名称: - singularity storage create uptobox - Uptobox - -用法: - singularity storage create uptobox [命令选项] [参数...] - -描述: - --access-token - 您的访问令牌。 - - 从 https://uptobox.com/my_account 获取。 - - --encoding - 后端的编码。 - - 有关详细信息,请参阅[概述中的编码部分](/overview/#encoding)。 - -选项: - --access-token value 您的访问令牌。[$ACCESS_TOKEN] - --help, -h 显示帮助信息 - - 高级选项 - - --encoding value 后端的编码。(默认值:"Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] - - 通用选项 - - --name value 存储的名称(默认值:自动生成) - --path value 存储的路径 -``` -{% endcode %} \ No newline at end of file diff --git a/docs/zh/cli-reference/storage/update/README.md b/docs/zh/cli-reference/storage/update/README.md index a3531faf3..4de379c1c 100644 --- a/docs/zh/cli-reference/storage/update/README.md +++ b/docs/zh/cli-reference/storage/update/README.md @@ -46,7 +46,6 @@ NAME: storj Storj 分散式云存储 sugarsync Sugarsync swift OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) - uptobox Uptobox webdav WebDAV yandex Yandex Disk zoho Zoho diff --git a/docs/zh/cli-reference/storage/update/uptobox.md b/docs/zh/cli-reference/storage/update/uptobox.md deleted file mode 100644 index 9ca8bc66a..000000000 --- a/docs/zh/cli-reference/storage/update/uptobox.md +++ /dev/null @@ -1,32 +0,0 @@ -# Uptobox - -{% code fullWidth="true" %} -``` -命令名称: - singularity storage update uptobox - Uptobox - -用法: - singularity storage update uptobox [命令选项] <名称|id> - -描述: - --access-token - 您的访问令牌。 - - 从 https://uptobox.com/my_account 获取。 - - --encoding - 后端的编码方式。 - - 有关更多信息,请参阅[概述中的编码部分](/overview/#encoding)。 - - -选项: - --access-token value 您的访问令牌。 [$ACCESS_TOKEN] - --help, -h 显示帮助 - - 高级选项 - - --encoding value 后端的编码方式。 (默认值: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] - -``` -{% endcode %} \ No newline at end of file diff --git a/go.mod b/go.mod index ffa7b4bd2..31fa722cb 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/data-preservation-programs/singularity -go 1.24.6 +go 1.25.8 require ( github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b @@ -24,9 +24,9 @@ require ( github.com/fxamacker/cbor/v2 v2.8.0 github.com/gammazero/workerpool v1.1.3 github.com/glebarez/sqlite v1.8.0 - github.com/go-openapi/errors v0.20.4 + github.com/go-openapi/errors v0.22.4 github.com/go-openapi/runtime v0.26.0 - github.com/go-openapi/strfmt v0.21.7 + github.com/go-openapi/strfmt v0.25.0 github.com/go-openapi/swag v0.23.0 github.com/go-openapi/validate v0.22.1 github.com/google/uuid v1.6.0 @@ -64,7 +64,7 @@ require ( github.com/multiformats/go-varint v0.1.0 github.com/orlangure/gnomock v0.32.0 github.com/parnurzeal/gorequest v0.2.16 - github.com/rclone/rclone v1.68.0 + github.com/rclone/rclone v1.73.2 github.com/rjNemo/underscore v0.5.0 github.com/robfig/cron/v3 v3.0.1 github.com/sashabaranov/go-openai v1.14.1 @@ -75,7 +75,7 @@ require ( github.com/ybbus/jsonrpc/v3 v3.1.4 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/text v0.30.0 + golang.org/x/text v0.34.0 gorm.io/driver/mysql v1.6.0 gorm.io/driver/postgres v1.6.0 gorm.io/driver/sqlite v1.6.0 @@ -88,52 +88,50 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.3 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect - github.com/Files-com/files-sdk-go/v3 v3.2.258 // indirect + github.com/Azure/go-ntlmssp v0.0.2-0.20251110135918-10b7b7e7cd26 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect + github.com/IBM/go-sdk-core/v5 v5.18.5 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect github.com/KyleBanks/depth v1.2.1 // indirect github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ProtonMail/go-crypto v1.3.0 // indirect - github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect - github.com/PuerkitoBio/goquery v1.10.3 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/abbot/go-http-auth v0.4.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.39.4 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/config v1.31.15 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.18.19 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11 // indirect + github.com/aws/aws-sdk-go-v2 v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect + github.com/aws/aws-sdk-go-v2/config v1.31.17 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.11 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.88.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.29.8 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.38.9 // indirect - github.com/aws/smithy-go v1.23.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.90.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect + github.com/aws/smithy-go v1.23.2 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bep/debounce v1.2.1 // indirect github.com/bits-and-blooms/bitset v1.13.0 // indirect - github.com/bradenaw/juniper v0.15.3 // indirect + github.com/buger/jsonparser v1.1.1 // indirect github.com/calebcase/tmpfile v1.0.3 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudflare/circl v1.6.1 // indirect + github.com/clipperhouse/stringish v0.1.1 // indirect + github.com/clipperhouse/uax29/v2 v2.3.0 // indirect github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect @@ -158,9 +156,8 @@ require ( github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect + github.com/ebitengine/purego v0.9.1 // indirect github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 // indirect - github.com/emersion/go-message v0.18.2 // indirect - github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect github.com/ethereum/c-kzg-4844 v1.0.0 // indirect github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -183,12 +180,13 @@ require ( github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.11 // indirect github.com/gammazero/chanqueue v1.1.1 // indirect github.com/gammazero/deque v1.1.0 // indirect github.com/geoffgarside/ber v1.2.0 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect github.com/glebarez/go-sqlite v1.21.1 // indirect - github.com/go-chi/chi/v5 v5.2.3 // indirect + github.com/go-chi/chi/v5 v5.2.5 // indirect github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -198,22 +196,27 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-resty/resty/v2 v2.16.5 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.28.0 // indirect github.com/go-sql-driver/mysql v1.9.3 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/gofrs/flock v0.13.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect github.com/hannahhoward/go-pubsub v1.0.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -230,6 +233,7 @@ require ( github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect + github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-merkledag v0.11.0 // indirect @@ -250,7 +254,7 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect - github.com/jlaffaye/ftp v0.2.0 // indirect + github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect @@ -264,18 +268,27 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/labstack/gommon v0.4.0 // indirect + github.com/lanrat/extsort v1.4.2 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-doh-resolver v0.5.0 // indirect github.com/libp2p/go-flow-metrics v0.3.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.34.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.7.0 // indirect + github.com/libp2p/go-libp2p-record v0.3.1 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-netroute v0.3.0 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.1.0 // indirect github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.19 // indirect github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -293,7 +306,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multistream v0.6.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/ncw/swift/v2 v2.0.4 // indirect + github.com/ncw/swift/v2 v2.0.5 // indirect github.com/nxadm/tail v1.4.11 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/onsi/ginkgo/v2 v2.23.4 // indirect @@ -301,10 +314,11 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/oracle/oci-go-sdk/v65 v65.102.1 // indirect + github.com/oracle/oci-go-sdk/v65 v65.104.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect + github.com/peterh/liner v1.2.2 // indirect github.com/pion/datachannel v1.5.10 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect github.com/pion/dtls/v3 v3.0.7 // indirect @@ -333,23 +347,20 @@ require ( github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.67.1 // indirect - github.com/prometheus/procfs v0.19.1 // indirect + github.com/prometheus/common v0.67.2 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.55.0 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect - github.com/relvacode/iso8601 v1.7.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rfjakob/eme v1.1.2 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/samber/lo v1.52.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect - github.com/shirou/gopsutil/v3 v3.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.7 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/shirou/gopsutil/v4 v4.25.10 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/smarty/assertions v1.16.0 // indirect github.com/sony/gobreaker v1.0.0 // indirect github.com/spacemonkeygo/monkit/v3 v3.0.25-0.20251022131615-eb24eb109368 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -357,14 +368,18 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/supranational/blst v0.3.16 // indirect github.com/swaggo/files/v2 v2.0.0 // indirect - github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c // indirect + github.com/t3rm1n4l/go-mega v0.0.0-20251031123324-a804aaa87491 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect github.com/tklauser/numcpus v0.10.0 // indirect + github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect github.com/whyrusleeping/cbor-gen v0.3.2-0.20250409092040-76796969edea // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/wlynxg/anet v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect @@ -375,8 +390,8 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/blake3 v0.2.4 // indirect github.com/zeebo/errs v1.4.0 // indirect - go.etcd.io/bbolt v1.4.3 // indirect - go.mongodb.org/mongo-driver v1.17.4 // indirect + github.com/zeebo/xxh3 v1.0.2 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect @@ -387,23 +402,24 @@ require ( go.uber.org/fx v1.24.0 // indirect go.uber.org/mock v0.6.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect - go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.43.0 // indirect + golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.29.0 // indirect - golang.org/x/net v0.46.0 // indirect - golang.org/x/oauth2 v0.32.0 // indirect - golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/telemetry v0.0.0-20251022145735-5be28d707443 // indirect - golang.org/x/term v0.36.0 // indirect + golang.org/x/mod v0.32.0 // indirect + golang.org/x/net v0.51.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.41.0 // indirect + golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect + golang.org/x/term v0.40.0 // indirect golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.38.0 // indirect + golang.org/x/tools v0.41.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - google.golang.org/api v0.253.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect + gonum.org/v1/gonum v0.16.0 // indirect + google.golang.org/api v0.255.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect google.golang.org/grpc v1.76.0 // indirect google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect @@ -416,7 +432,7 @@ require ( moul.io/http2curl v1.0.0 // indirect nhooyr.io/websocket v1.8.10 // indirect rsc.io/tmplfunc v0.0.3 // indirect - storj.io/common v0.0.0-20251022143549-19bf6a9f274a // indirect + storj.io/common v0.0.0-20251107171817-6221ae45072c // indirect storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect storj.io/infectious v0.0.2 // indirect @@ -425,3 +441,5 @@ require ( ) replace github.com/filecoin-project/lassie => github.com/parkan/lassie v0.0.0-20251028120409-065e9fd563ad + +replace github.com/rclone/rclone => github.com/parkan/rclone v0.0.0-20260317152005-3777b647816d diff --git a/go.sum b/go.sum index 6b523a98e..baac06e92 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,8 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= @@ -64,18 +64,24 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.3 h1:sxgSqOB9CDToiaVFp github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.3/go.mod h1:XdED8i399lEVblYHTZM8eXaP07gv4Z58IL6ueMlVlrg= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.2-0.20251110135918-10b7b7e7cd26 h1:gy/jrlpp8EfSyA73a51fofoSfhp5rPNQAUvDr4Dm91c= +github.com/Azure/go-ntlmssp v0.0.2-0.20251110135918-10b7b7e7cd26/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI= -github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Files-com/files-sdk-go/v3 v3.2.258 h1:9wfRblRg0qc7SIMD8AaE+pMWp1KvR6eFwvMVTwDVIG4= -github.com/Files-com/files-sdk-go/v3 v3.2.258/go.mod h1:wGqkOzRu/ClJibvDgcfuJNAqI2nLhe8g91tPlDKRCdE= +github.com/FilenCloudDienste/filen-sdk-go v0.0.37 h1:W8S9TrAyZ4//3PXsU6+Bi+fe/6uIL986GyS7PVzIDL4= +github.com/FilenCloudDienste/filen-sdk-go v0.0.37/go.mod h1:0cBhKXQg49XbKZZfk5TCDa3sVLP+xMxZTWL+7KY0XR0= +github.com/Files-com/files-sdk-go/v3 v3.2.264 h1:lMHTplAYI9FtmCo/QOcpRxmPA5REVAct1r2riQmDQKw= +github.com/Files-com/files-sdk-go/v3 v3.2.264/go.mod h1:wGqkOzRu/ClJibvDgcfuJNAqI2nLhe8g91tPlDKRCdE= +github.com/IBM/go-sdk-core/v5 v5.18.5 h1:g0JRl3sYXJczB/yuDlrN6x22LJ6jIxhp0Sa4ARNW60c= +github.com/IBM/go-sdk-core/v5 v5.18.5/go.mod h1:KonTFRR+8ZSgw5cxBSYo6E4WZoY1+7n1kfHM82VcjFU= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= @@ -105,64 +111,68 @@ github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDO github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/a1ex3/zstd-seekable-format-go/pkg v0.10.0 h1:iLDOF0rdGTrol/q8OfPIIs5kLD8XvA2q75o6Uq/tgak= +github.com/a1ex3/zstd-seekable-format-go/pkg v0.10.0/go.mod h1:DrEWcQJjz7t5iF2duaiyhg4jyoF0kxOD6LtECNGkZ/Q= github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 h1:hhdWprfSpFbN7lz3W1gM40vOgvSh1WCSMxYD6gGB4Hs= github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3/go.mod h1:XaUnRxSCYgL3kkgX0QHIV0D+znljPIDImxlv2kbGv0Y= github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0= github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/anchore/go-lzo v0.1.0 h1:NgAacnzqPeGH49Ky19QKLBZEuFRqtTG9cdaucc3Vncs= +github.com/anchore/go-lzo v0.1.0/go.mod h1:3kLx0bve2oN1iDwgM1U5zGku1Tfbdb0No5qp1eL1fIk= github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU= github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/aws/aws-sdk-go-v2 v1.39.4 h1:qTsQKcdQPHnfGYBBs+Btl8QwxJeoWcOcPcixK90mRhg= -github.com/aws/aws-sdk-go-v2 v1.39.4/go.mod h1:yWSxrnioGUZ4WVv9TgMrNUeLV3PFESn/v+6T/Su8gnM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= -github.com/aws/aws-sdk-go-v2/config v1.31.15 h1:gE3M4xuNXfC/9bG4hyowGm/35uQTi7bUKeYs5e/6uvU= -github.com/aws/aws-sdk-go-v2/config v1.31.15/go.mod h1:HvnvGJoE2I95KAIW8kkWVPJ4XhdrlvwJpV6pEzFQa8o= -github.com/aws/aws-sdk-go-v2/credentials v1.18.19 h1:Jc1zzwkSY1QbkEcLujwqRTXOdvW8ppND3jRBb/VhBQc= -github.com/aws/aws-sdk-go-v2/credentials v1.18.19/go.mod h1:DIfQ9fAk5H0pGtnqfqkbSIzky82qYnGvh06ASQXXg6A= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11 h1:X7X4YKb+c0rkI6d4uJ5tEMxXgCZ+jZ/D6mvkno8c8Uw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11/go.mod h1:EqM6vPZQsZHYvC4Cai35UDg/f5NCEU+vp0WfbVqVcZc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.15 h1:OsZ2Sk84YUPJfi6BemhyMQyuR8/5tWu37WBMVUl8lJk= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.15/go.mod h1:CYZDjBMY+MyT+U+QmXw81GBiq+lhgM97kIMdDAJk+hg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11 h1:7AANQZkF3ihM8fbdftpjhken0TP9sBzFbV/Ze/Y4HXA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11/go.mod h1:NTF4QCGkm6fzVwncpkFQqoquQyOolcyXfbpC98urj+c= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11 h1:ShdtWUZT37LCAA4Mw2kJAJtzaszfSHFb5n25sdcv4YE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11/go.mod h1:7bUb2sSr2MZ3M/N+VyETLTQtInemHXb/Fl3s8CLzm0Y= +github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk= +github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y= +github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y= +github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c= +github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA= +github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.4 h1:2fjfz3/G9BRvIKuNZ655GwzpklC2kEH0cowZQGO7uBg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.4/go.mod h1:Ymws824lvMypLFPwyyUXM52SXuGgxpu0+DISLfKvB+c= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.11 h1:bKgSxk1TW//00PGQqYmrq83c+2myGidEclp+t9pPqVI= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.11/go.mod h1:vrPYCQ6rFHL8jzQA8ppu3gWX18zxjLIDGTeqDxkBmSI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 h1:xtuxji5CS0JknaXoACOunXOYOQzgfTvGAc9s2QdCJA4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2/go.mod h1:zxwi0DIR0rcRcgdbl7E2MSOvxDyyXGBlScvBkARFaLQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.2 h1:DGFpGybmutVsCuF6vSuLZ25Vh55E3VmsnJmFfjeBx4M= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.2/go.mod h1:hm/wU1HDvXCFEDzOLorQnZZ/CVvPXvWEmHMSmqgQRuA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11 h1:GpMf3z2KJa4RnJ0ew3Hac+hRFYLZ9DDjfgXjuW+pB54= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11/go.mod h1:6MZP3ZI4QQsgUCFTwMZA2V0sEriNQ8k2hmoHF3qjimQ= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.11 h1:weapBOuuFIBEQ9OX/NVW3tFQCvSutyjZYk/ga5jDLPo= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.11/go.mod h1:3C1gN4FmIVLwYSh8etngUS+f1viY6nLCDVtZmrFbDy0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.7 h1:Wer3W0GuaedWT7dv/PiWNZGSQFSTcBY2rZpbiUp5xcA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.7/go.mod h1:UHKgcRSx8PVtvsc1Poxb/Co3PD3wL7P+f49P0+cWtuY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 h1:eg/WYAa12vqTphzIdWMzqYRVKKnCboVPRlvaybNCqPA= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13/go.mod h1:/FDdxWhz1486obGrKKC1HONd7krpk38LBt+dutLcN9k= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 h1:NvMjwvv8hpGUILarKw7Z4Q0w1H9anXKsesMxtw++MA4= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4/go.mod h1:455WPHSwaGj2waRSpQp7TsnpOnBfw8iDfPfbwl7KPJE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 h1:zhBJXdhWIFZ1acfDYIhu4+LCzdUS2Vbcum7D01dXlHQ= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13/go.mod h1:JaaOeCE368qn2Hzi3sEzY6FgAZVCIYcC2nwbro2QCh8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.90.0 h1:ef6gIJR+xv/JQWwpa5FYirzoQctfSJm7tuDe3SZsUf8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.90.0/go.mod h1:+wArOOrcHUevqdto9k1tKOF5++YTe9JEcPSc9Tx2ZSw= github.com/aws/aws-sdk-go-v2/service/sns v1.34.5 h1:xWwv6Ue0EoD9APZNNrgtXaf79yQKyz5TbvXiQLkywWs= github.com/aws/aws-sdk-go-v2/service/sns v1.34.5/go.mod h1:PJtxxMdj747j8DeZENRTTYAz/lx/pADn/U0k7YNNiUY= github.com/aws/aws-sdk-go-v2/service/sqs v1.38.6 h1:XwpzAaL0nKdSvDS0SRGIQWkqpS8DjcyBRJcatPBFijY= github.com/aws/aws-sdk-go-v2/service/sqs v1.38.6/go.mod h1:Bar4MrRxeqdn6XIh8JGfiXuFRmyrrsZNTJotxEJmWW0= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.8 h1:M5nimZmugcZUO9wG7iVtROxPhiqyZX6ejS1lxlDPbTU= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.8/go.mod h1:mbef/pgKhtKRwrigPPs7SSSKZgytzP8PQ6P6JAAdqyM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3 h1:S5GuJZpYxE0lKeMHKn+BRTz6PTFpgThyJ+5mYfux7BM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3/go.mod h1:X4OF+BTd7HIb3L+tc4UlWHVrpgwZZIVENU15pRDVTI0= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.9 h1:Ekml5vGg6sHSZLZJQJagefnVe6PmqC2oiRkBq4F7fU0= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.9/go.mod h1:/e15V+o1zFHWdH3u7lpI3rVBcxszktIKuHKCY2/py+k= -github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M= -github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo= +github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs= +github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk= +github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM= +github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/bcicen/jstream v1.0.1 h1:BXY7Cu4rdmc0rhyTVyT3UkxAiX3bnLpKLas9btbH5ck= github.com/bcicen/jstream v1.0.1/go.mod h1:9ielPxqFry7Y4Tg3j4BfjPocfJ3TbsRtXOAYXYmRuAQ= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -174,6 +184,8 @@ github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY= github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/boombuler/barcode v1.1.0 h1:ChaYjBR63fr4LFyGn8E8nt7dBSt3MiU3zMOZqFvVkHo= +github.com/boombuler/barcode v1.1.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo= github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= @@ -184,6 +196,8 @@ github.com/brianvoe/gofakeit/v6 v6.23.2/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2 github.com/buengese/sgzip v0.1.1 h1:ry+T8l1mlmiWEsDrH/YHZnCVWD2S3im1KLsyO+8ZmTU= github.com/buengese/sgzip v0.1.1/go.mod h1:i5ZiXGF3fhV7gL1xaRRL1nDnmpNj0X061FQzOS8VMas= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/calebcase/tmpfile v1.0.3 h1:BZrOWZ79gJqQ3XbAQlihYZf/YCV0H4KPIdM5K5oMpJo= github.com/calebcase/tmpfile v1.0.3/go.mod h1:UAUc01aHeC+pudPagY/lWvt2qS9ZO5Zzof6/tIUzqeI= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= @@ -201,8 +215,14 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= +github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= +github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= +github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= +github.com/cloudinary/cloudinary-go/v2 v2.13.0 h1:ugiQwb7DwpWQnete2AZkTh94MonZKmxD7hDGy1qTzDs= +github.com/cloudinary/cloudinary-go/v2 v2.13.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo= github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg= github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc/go.mod h1:CgWpFCFWzzEA5hVkhAc6DZZzGd3czx+BblvOzjmg6KA= github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc h1:0xCWmFKBmarCqqqLeM7jFBSw/Or81UEElFqO8MY+GDs= @@ -247,6 +267,8 @@ github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJ github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk= +github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= github.com/cronokirby/saferith v0.33.0 h1:TgoQlfsD4LIwx71+ChfRcIpjkw+RPOapDEVxa+LhwLo= github.com/cronokirby/saferith v0.33.0/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= @@ -267,6 +289,8 @@ github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/diskfs/go-diskfs v1.7.0 h1:vonWmt5CMowXwUc79jWyGrf2DIMeoOjkLlMnQYGVOs8= +github.com/diskfs/go-diskfs v1.7.0/go.mod h1:LhQyXqOugWFRahYUSw47NyZJPezFzB9UELwhpszLP/k= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= @@ -275,6 +299,8 @@ github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pM github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dromara/dongle v1.0.1 h1:si/7UP/EXxnFVZok1cNos70GiMGxInAYMilHQFP5dJs= +github.com/dromara/dongle v1.0.1/go.mod h1:ebFhTaDgxaDIKppycENTWlBsxz8mWCPWOLnsEgDpMv4= github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXIiWJETBpRgERYTGlmMd7HU= github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5/go.mod h1:rSS3kM9XMzSQ6pw91Qgd6yB5jdt70N4OdtrAf74As5M= github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= @@ -282,6 +308,8 @@ github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq4 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A= +github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0= github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= @@ -375,8 +403,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= -github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik= +github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ= github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo= @@ -394,13 +422,15 @@ github.com/glebarez/sqlite v1.8.0 h1:02X12E2I/4C1n+v90yTqrjRa8yuo7c3KeHI3FRznCvc github.com/glebarez/sqlite v1.8.0/go.mod h1:bpET16h1za2KOOMb8+jCp6UBP/iahDpfPQqSaYLTLx8= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= -github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= +github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68= github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348/go.mod h1:Czxo/d1g948LtrALAZdL04TL/HnkopquAjxYUuI02bo= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -419,8 +449,8 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= @@ -442,16 +472,26 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= +github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= @@ -460,6 +500,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -532,6 +574,8 @@ github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -546,6 +590,8 @@ github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+u github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -564,8 +610,8 @@ github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= -github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -576,6 +622,8 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= +github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= @@ -602,6 +650,8 @@ github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpx github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= @@ -615,10 +665,6 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0= -github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts= -github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw= -github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= @@ -631,6 +677,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/indexsupply/shovel v0.1.9-0.20260111041930-aea8d42c335c h1:w/28kFk3BSpsdRbTyAWJAnCt224qASF+FK92nlq/OBY= github.com/indexsupply/shovel v0.1.9-0.20260111041930-aea8d42c335c/go.mod h1:2fbvQP5CghUhrRn/XEQtwHXt51oaefJc0xqbAH+TTBE= +github.com/internxt/rclone-adapter v0.0.0-20260220172730-613f4cc8b8fd h1:dSIuz2mpJAPQfhHYtG57D0qwSkgC/vQ69gHfeyQ4kxA= +github.com/internxt/rclone-adapter v0.0.0-20260220172730-613f4cc8b8fd/go.mod h1:vdPya4AIcDjvng4ViaAzqjegJf0VHYpYHQguFx5xBp0= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= github.com/ipfs/boxo v0.35.0 h1:3Mku5arSbAZz0dvb4goXRsQuZkFkPrGr5yYdu0YM1pY= @@ -689,6 +737,8 @@ github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6 github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= @@ -791,8 +841,8 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg= -github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI= +github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 h1:ZxO6Qr2GOXPdcW80Mcn3nemvilMPvpWqxrNfK2ZnNNs= +github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3/go.mod h1:dvLUr/8Fs9a2OBrEnCC5duphbkz/k/mSy5OkXg3PAgI= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= @@ -849,20 +899,34 @@ github.com/labstack/echo/v4 v4.10.2 h1:n1jAhnq/elIFTHr1EYpiYtyKgx4RW9ccVgkqByZaN github.com/labstack/echo/v4 v4.10.2/go.mod h1:OEyqf2//K1DFdE57vw2DRgWY0M7s65IVQO2FzvI4J5k= github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/lanrat/extsort v1.4.2 h1:akbLIdo4PhNZtvjpaWnbXtGMmLtnGzXplkzfgl+XTTY= +github.com/lanrat/extsort v1.4.2/go.mod h1:hceP6kxKPKebjN1RVrDBXMXXECbaI41Y94tt6MDazc4= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-doh-resolver v0.5.0 h1:4h7plVVW+XTS+oUBw2+8KfoM1jF6w8XmO7+skhePFdE= +github.com/libp2p/go-doh-resolver v0.5.0/go.mod h1:aPDxfiD2hNURgd13+hfo29z9IC22fv30ee5iM31RzxU= github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0= +github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o= +github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs= +github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= @@ -882,8 +946,9 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= +github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg= github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= @@ -896,8 +961,9 @@ github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stg github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= @@ -982,8 +1048,8 @@ github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOo github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/ncw/swift/v2 v2.0.4 h1:hHWVFxn5/YaTWAASmn4qyq2p6OyP/Hm3vMLzkjEqR7w= -github.com/ncw/swift/v2 v2.0.4/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= +github.com/ncw/swift/v2 v2.0.5 h1:9o5Gsd7bInAFEqsGPcaUdsboMbqf8lnNtxqWKFT9iz8= +github.com/ncw/swift/v2 v2.0.5/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -1007,14 +1073,16 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/oracle/oci-go-sdk/v65 v65.102.1 h1:zLNLz5dVzZxOf5DK/f3WGZUjwrQ9m27fd4abOFwQRCQ= -github.com/oracle/oci-go-sdk/v65 v65.102.1/go.mod h1:oB8jFGVc/7/zJ+DbleE8MzGHjhs2ioCz5stRTdZdIcY= +github.com/oracle/oci-go-sdk/v65 v65.104.0 h1:l9awEvzWvxmYhy/97A0hZ87pa7BncYXmcO/S8+rvgK0= +github.com/oracle/oci-go-sdk/v65 v65.104.0/go.mod h1:oB8jFGVc/7/zJ+DbleE8MzGHjhs2ioCz5stRTdZdIcY= github.com/orlangure/gnomock v0.32.0 h1:96KCsqbDUaKz2nKkGqC0nIlJeCRaSfEpSi4CljBp8zk= github.com/orlangure/gnomock v0.32.0/go.mod h1:CpMbwyCmPFpeLrsA5LIUcMrGm7LOf9+2JE+taayrUPc= github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg= github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= github.com/parkan/lassie v0.0.0-20251028120409-065e9fd563ad h1:q56OIYelfaF5vvhyjMR9CSIK3xPoi/RefZTuILbAEcI= github.com/parkan/lassie v0.0.0-20251028120409-065e9fd563ad/go.mod h1:P+HSZ4olI9imIXijFMLATQxfxu6erhWTtihKarK5tbs= +github.com/parkan/rclone v0.0.0-20260317152005-3777b647816d h1:/PsMYjkePqFFCS8itIvzEMzC2MhTSssz9x+X8LJd6rc= +github.com/parkan/rclone v0.0.0-20260317152005-3777b647816d/go.mod h1:0yvNwdX/c+ZOHm275UV7IpknJiWe/E/5U0uChfkXMGs= github.com/parnurzeal/gorequest v0.2.16 h1:T/5x+/4BT+nj+3eSknXmCTnEVGSzFzPGdpqmUVVZXHQ= github.com/parnurzeal/gorequest v0.2.16/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= @@ -1026,6 +1094,10 @@ github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uC github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/peterh/liner v1.2.2 h1:aJ4AOodmL+JxOZZEL2u9iJf8omNRpqHc/EbrK+3mAXw= +github.com/peterh/liner v1.2.2/go.mod h1:xFwJyiKIXJZUKItq5dGHZSTBRAuG/CpeNpWLyiNRNwI= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= @@ -1092,6 +1164,8 @@ github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4 github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= +github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= @@ -1100,11 +1174,11 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI= -github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q= +github.com/prometheus/common v0.67.2 h1:PcBAckGFTIHt2+L3I33uNRTlKTplNzFctXcWhPyAEN8= +github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.19.1 h1:QVtROpTkphuXuNlnCv3m1ut3JytkXHtQ3xvck/YmzMM= -github.com/prometheus/procfs v0.19.1/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8= github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= @@ -1113,8 +1187,10 @@ github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9M github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= -github.com/rclone/rclone v1.68.0 h1:tsPAAlf7Mu2WPs+hA12v34ojCcgwcOlHas9b8Ju6HYw= -github.com/rclone/rclone v1.68.0/go.mod h1:T8XKOt/2Fb9INROUtFH9eF9q9o9rI1W2qTrW2bw2cYU= +github.com/rclone/Proton-API-Bridge v1.0.1-0.20260127174007-77f974840d11 h1:4MI2alxM/Ye2gIRBlYf28JGWTipZ4Zz7yAziPKrttjs= +github.com/rclone/Proton-API-Bridge v1.0.1-0.20260127174007-77f974840d11/go.mod h1:3HLX7dwZgvB7nt+Yl/xdzVPcargQ1yBmJEUg3n+jMKM= +github.com/rclone/go-proton-api v1.0.1-0.20260127173028-eb465cac3b18 h1:Lc+d3ISfQaMJKWZOE7z4ZSY4RVmdzbn1B0IM8xN18qM= +github.com/rclone/go-proton-api v1.0.1-0.20260127173028-eb465cac3b18/go.mod h1:LB2kCEaZMzNn3ocdz+qYfxXmuLxxN0ka62KJd2x53Bc= github.com/relvacode/iso8601 v1.7.0 h1:BXy+V60stMP6cpswc+a93Mq3e65PfXCgDFfhvNNGrdo= github.com/relvacode/iso8601 v1.7.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1122,8 +1198,6 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4= github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjNemo/underscore v0.5.0 h1:Pa58PfchgZWgCY1eBKjER/lm0repbGrTzq6RRxtnGmg= github.com/rjNemo/underscore v0.5.0/go.mod h1:y3LuKy2UP6zp7yZff5ZGRm1s/s9QvCoCoQZVqAkk3hM= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -1151,12 +1225,8 @@ github.com/sashabaranov/go-openai v1.14.1/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adO github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= -github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= -github.com/shoenig/go-m1cpu v0.1.7 h1:C76Yd0ObKR82W4vhfjZiCp0HxcSZ8Nqd84v+HZ0qyI0= -github.com/shoenig/go-m1cpu v0.1.7/go.mod h1:KkDOw6m3ZJQAPHbrzkZki4hnx+pDRR1Lo+ldA56wD5w= -github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= -github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= +github.com/shirou/gopsutil/v4 v4.25.10 h1:at8lk/5T1OgtuCp+AwrDofFRjnvosn0nkN2OLQ6g8tA= +github.com/shirou/gopsutil/v4 v4.25.10/go.mod h1:+kSwyC8DRUD9XXEHCAFjK+0nuArFJM0lva+StQAcskM= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -1184,19 +1254,20 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/smarty/assertions v1.16.0 h1:EvHNkdRA4QHMrn75NZSoUQ/mAUXAYWfatfB01yTCzfY= +github.com/smarty/assertions v1.16.0/go.mod h1:duaaFdCS0K9dnoM50iyek/eYINOZ64gbh1Xlf6LG7AI= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= -github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= @@ -1240,8 +1311,8 @@ github.com/swaggo/swag v1.16.1 h1:fTNRhKstPKxcnoKsytm4sahr8FaYzUcT7i1/3nd/fBg= github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKikGxto= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c h1:BLopNCyqewbE8+BtlIp/Juzu8AJGxz0gHdGADnsblVc= -github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c/go.mod h1:ykucQyiE9Q2qx1wLlEtZkkNn1IURib/2O+Mvd25i1Fo= +github.com/t3rm1n4l/go-mega v0.0.0-20251031123324-a804aaa87491 h1:rrGZv6xYk37hx0tW2sYfgbO0PqStbHqz6Bq6oc9Hurg= +github.com/t3rm1n4l/go-mega v0.0.0-20251031123324-a804aaa87491/go.mod h1:ykucQyiE9Q2qx1wLlEtZkkNn1IURib/2O+Mvd25i1Fo= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= @@ -1250,6 +1321,12 @@ github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfj github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/unknwon/goconfig v1.0.0 h1:rS7O+CmUdli1T+oDm7fYj1MwqNWtEJfNj+FqcUHML8U= github.com/unknwon/goconfig v1.0.0/go.mod h1:qu2ZQ/wcC/if2u32263HTVC39PeOQRSmidQk3DuDFQ8= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -1269,6 +1346,8 @@ github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvS github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= @@ -1287,6 +1366,10 @@ github.com/whyrusleeping/cbor-gen v0.3.2-0.20250409092040-76796969edea h1:/uOIA8 github.com/whyrusleeping/cbor-gen v0.3.2-0.20250409092040-76796969edea/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= @@ -1328,6 +1411,8 @@ github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b h1:CzigHMRySiX3drau9C6Q5CAbNIApmLdat5jPMqChvDA= gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b/go.mod h1:/y/V339mxv2sZmYYR64O07VuCpdNZqCTwO8ZcouTMI8= gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 h1:qwDnMxjkyLmAFgcfgTnfJrmYKWhHnci3GjDqcZp1M3Q= @@ -1337,8 +1422,8 @@ go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= -go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1420,8 +1505,8 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58 golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1436,6 +1521,8 @@ golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2 golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ= +golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -1457,8 +1544,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1501,8 +1588,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= +golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1511,8 +1598,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= -golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1527,8 +1614,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1580,6 +1667,7 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1592,10 +1680,10 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251022145735-5be28d707443 h1:eE5IhBiTMPgrcTS6Mlh7IG4MdydRrXr2y60Jn/JC6kM= -golang.org/x/telemetry v0.0.0-20251022145735-5be28d707443/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo= +golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -1603,8 +1691,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1617,8 +1705,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1680,8 +1768,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1709,8 +1797,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.253.0 h1:apU86Eq9Q2eQco3NsUYFpVTfy7DwemojL7LmbAj7g/I= -google.golang.org/api v0.253.0/go.mod h1:PX09ad0r/4du83vZVAaGg7OaeyGnaUmT/CYPNvtLCbw= +google.golang.org/api v0.255.0 h1:OaF+IbRwOottVCYV2wZan7KUq7UeNUQn1BcPc4K7lE4= +google.golang.org/api v0.255.0/go.mod h1:d1/EtvCLdtiWEV4rAEHDHGh2bCnqsWhw+M8y2ECN4a8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1756,8 +1844,8 @@ google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuO google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1861,8 +1949,8 @@ rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -storj.io/common v0.0.0-20251022143549-19bf6a9f274a h1:g3stKez/XuRJ25RYRSUthdhG86DgFpBAvnOqHufHdDM= -storj.io/common v0.0.0-20251022143549-19bf6a9f274a/go.mod h1:UgjRCGYYYQc2U5W9MiNZJYBa3pCM8t/TLVzlTfAjKBY= +storj.io/common v0.0.0-20251107171817-6221ae45072c h1:UDXSrdeLJe3QFouavSW10fYdpclK0YNu3KvQHzqq2+k= +storj.io/common v0.0.0-20251107171817-6221ae45072c/go.mod h1:XNX7uykja6aco92y2y8RuqaXIDRPpt1YA2OQDKlKEUk= storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 h1:8OE12DvUnB9lfZcHe7IDGsuhjrY9GBAr964PVHmhsro= storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55/go.mod h1:Y9LZaa8esL1PW2IDMqJE7CFSNq7d5bQ3RI7mGPtmKMg= storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 h1:5MZ0CyMbG6Pi0rRzUWVG6dvpXjbBYEX2oyXuj+tT+sk= diff --git a/handler/storage/types_gen.go b/handler/storage/types_gen.go index 95e6f6429..51e9cac00 100644 --- a/handler/storage/types_gen.go +++ b/handler/storage/types_gen.go @@ -10,6 +10,7 @@ type azureblobConfig struct { EnvAuth bool `json:"envAuth" default:"false"` // Read credentials from runtime (environment variables, CLI or MSI). Key string `json:"key"` // Storage Account Shared Key. SasUrl string `json:"sasUrl"` // SAS URL for container level access only. + ConnectionString string `json:"connectionString"` // Storage Connection String. Tenant string `json:"tenant"` // ID of the service principal's tenant. Also called its directory ID. ClientId string `json:"clientId"` // The ID of the client in use. ClientSecret string `json:"clientSecret"` // One of the service principal's client secrets @@ -19,15 +20,20 @@ type azureblobConfig struct { Username string `json:"username"` // User name (usually an email address) Password string `json:"password"` // The user's password ServicePrincipalFile string `json:"servicePrincipalFile"` // Path to file containing credentials for use with a service principal. + DisableInstanceDiscovery bool `json:"disableInstanceDiscovery" default:"false"` // Skip requesting Microsoft Entra instance metadata UseMsi bool `json:"useMsi" default:"false"` // Use a managed service identity to authenticate (only works in Azure). MsiObjectId string `json:"msiObjectId"` // Object ID of the user-assigned MSI to use, if any. MsiClientId string `json:"msiClientId"` // Object ID of the user-assigned MSI to use, if any. MsiMiResId string `json:"msiMiResId"` // Azure resource ID of the user-assigned MSI to use, if any. UseEmulator bool `json:"useEmulator" default:"false"` // Uses local storage emulator if provided as 'true'. + UseAz bool `json:"useAz" default:"false"` // Use Azure CLI tool az for authentication Endpoint string `json:"endpoint"` // Endpoint for the service. UploadCutoff string `json:"uploadCutoff"` // Cutoff for switching to chunked upload (<= 256 MiB) (deprecated). ChunkSize string `json:"chunkSize" default:"4Mi"` // Upload chunk size. UploadConcurrency int `json:"uploadConcurrency" default:"16"` // Concurrency for multipart uploads. + CopyCutoff string `json:"copyCutoff" default:"8Mi"` // Cutoff for switching to multipart copy. + CopyConcurrency int `json:"copyConcurrency" default:"512"` // Concurrency for multipart copy. + UseCopyBlob bool `json:"useCopyBlob" default:"true"` // Whether to use the Copy Blob API when copying to the same storage account. ListChunk int `json:"listChunk" default:"5000"` // Size of blob list. AccessTier string `json:"accessTier"` // Access tier of blob: hot, cool, cold or archive. ArchiveTierDelete bool `json:"archiveTierDelete" default:"false"` // Delete archive tier blobs before overwriting. @@ -81,6 +87,10 @@ type b2Config struct { MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) Lifecycle int `json:"lifecycle" default:"0"` // Set the number of days deleted files should be kept when creating a bucket. Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in B2. + SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data + SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data + SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). Description string `json:"description"` // Description of the remote. } @@ -104,22 +114,24 @@ type createB2StorageRequest struct { func createB2Storage() {} type boxConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - RootFolderId string `json:"rootFolderId" default:"0"` // Fill in for rclone to use a non root folder as its starting point. - BoxConfigFile string `json:"boxConfigFile"` // Box App config.json location - AccessToken string `json:"accessToken"` // Box App Primary Access Token - BoxSubType string `json:"boxSubType" default:"user" example:"user"` // - UploadCutoff string `json:"uploadCutoff" default:"50Mi"` // Cutoff for switching to multipart upload (>= 50 MiB). - CommitRetries int `json:"commitRetries" default:"100"` // Max number of times to try committing a multipart file. - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk 1-1000. - OwnedBy string `json:"ownedBy"` // Only show items owned by the login (email address) passed in. - Impersonate string `json:"impersonate"` // Impersonate this user ID when using a service account. - Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. + RootFolderId string `json:"rootFolderId" default:"0"` // Fill in for rclone to use a non root folder as its starting point. + BoxConfigFile string `json:"boxConfigFile"` // Box App config.json location + ConfigCredentials string `json:"configCredentials"` // Box App config.json contents. + AccessToken string `json:"accessToken"` // Box App Primary Access Token + BoxSubType string `json:"boxSubType" default:"user" example:"user"` // + UploadCutoff string `json:"uploadCutoff" default:"50Mi"` // Cutoff for switching to multipart upload (>= 50 MiB). + CommitRetries int `json:"commitRetries" default:"100"` // Max number of times to try committing a multipart file. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk 1-1000. + OwnedBy string `json:"ownedBy"` // Only show items owned by the login (email address) passed in. + Impersonate string `json:"impersonate"` // Impersonate this user ID when using a service account. + Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createBoxStorageRequest struct { @@ -142,56 +154,58 @@ type createBoxStorageRequest struct { func createBoxStorage() {} type driveConfig struct { - ClientId string `json:"clientId"` // Google Application Client Id - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - Scope string `json:"scope" example:"drive"` // Comma separated list of scopes that rclone should use when requesting access from drive. - RootFolderId string `json:"rootFolderId"` // ID of the root folder. - ServiceAccountFile string `json:"serviceAccountFile"` // Service Account Credentials JSON file path. - ServiceAccountCredentials string `json:"serviceAccountCredentials"` // Service Account Credentials JSON blob. - TeamDrive string `json:"teamDrive"` // ID of the Shared Drive (Team Drive). - AuthOwnerOnly bool `json:"authOwnerOnly" default:"false"` // Only consider files owned by the authenticated user. - UseTrash bool `json:"useTrash" default:"true"` // Send files to the trash instead of deleting permanently. - CopyShortcutContent bool `json:"copyShortcutContent" default:"false"` // Server side copy contents of shortcuts instead of the shortcut. - SkipGdocs bool `json:"skipGdocs" default:"false"` // Skip google documents in all listings. - ShowAllGdocs bool `json:"showAllGdocs" default:"false"` // Show all Google Docs including non-exportable ones in listings. - SkipChecksumGphotos bool `json:"skipChecksumGphotos" default:"false"` // Skip checksums on Google photos and videos only. - SharedWithMe bool `json:"sharedWithMe" default:"false"` // Only show files that are shared with me. - TrashedOnly bool `json:"trashedOnly" default:"false"` // Only show files that are in the trash. - StarredOnly bool `json:"starredOnly" default:"false"` // Only show files that are starred. - Formats string `json:"formats"` // Deprecated: See export_formats. - ExportFormats string `json:"exportFormats" default:"docx,xlsx,pptx,svg"` // Comma separated list of preferred formats for downloading Google docs. - ImportFormats string `json:"importFormats"` // Comma separated list of preferred formats for uploading Google docs. - AllowImportNameChange bool `json:"allowImportNameChange" default:"false"` // Allow the filetype to change when uploading Google docs. - UseCreatedDate bool `json:"useCreatedDate" default:"false"` // Use file created date instead of modified date. - UseSharedDate bool `json:"useSharedDate" default:"false"` // Use date file was shared instead of modified date. - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk 100-1000, 0 to disable. - Impersonate string `json:"impersonate"` // Impersonate this user when using a service account. - AlternateExport bool `json:"alternateExport" default:"false"` // Deprecated: No longer needed. - UploadCutoff string `json:"uploadCutoff" default:"8Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"8Mi"` // Upload chunk size. - AcknowledgeAbuse bool `json:"acknowledgeAbuse" default:"false"` // Set to allow files which return cannotDownloadAbusiveFile to be downloaded. - KeepRevisionForever bool `json:"keepRevisionForever" default:"false"` // Keep new head revision of each file forever. - SizeAsQuota bool `json:"sizeAsQuota" default:"false"` // Show sizes as storage quota usage, not actual size. - V2DownloadMinSize string `json:"v2DownloadMinSize" default:"off"` // If Object's are greater, use drive v2 API to download. - PacerMinSleep string `json:"pacerMinSleep" default:"100ms"` // Minimum time to sleep between API calls. - PacerBurst int `json:"pacerBurst" default:"100"` // Number of API calls to allow without sleeping. - ServerSideAcrossConfigs bool `json:"serverSideAcrossConfigs" default:"false"` // Deprecated: use --server-side-across-configs instead. - DisableHttp2 bool `json:"disableHttp2" default:"true"` // Disable drive using http2. - StopOnUploadLimit bool `json:"stopOnUploadLimit" default:"false"` // Make upload limit errors be fatal. - StopOnDownloadLimit bool `json:"stopOnDownloadLimit" default:"false"` // Make download limit errors be fatal. - SkipShortcuts bool `json:"skipShortcuts" default:"false"` // If set skip shortcut files. - SkipDanglingShortcuts bool `json:"skipDanglingShortcuts" default:"false"` // If set skip dangling shortcut files. - ResourceKey string `json:"resourceKey"` // Resource key for accessing a link-shared file. - FastListBugFix bool `json:"fastListBugFix" default:"true"` // Work around a bug in Google Drive listing. - MetadataOwner string `json:"metadataOwner" default:"read" example:"off"` // Control whether owner should be read or written in metadata. - MetadataPermissions string `json:"metadataPermissions" default:"off" example:"off"` // Control whether permissions should be read or written in metadata. - MetadataLabels string `json:"metadataLabels" default:"off" example:"off"` // Control whether labels should be read or written in metadata. - Encoding string `json:"encoding" default:"InvalidUtf8"` // The encoding for the backend. - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get IAM credentials from runtime (environment variables or instance meta data if no env vars). - Description string `json:"description"` // Description of the remote. + ClientId string `json:"clientId"` // Google Application Client Id + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. + Scope string `json:"scope" example:"drive"` // Comma separated list of scopes that rclone should use when requesting access from drive. + RootFolderId string `json:"rootFolderId"` // ID of the root folder. + ServiceAccountFile string `json:"serviceAccountFile"` // Service Account Credentials JSON file path. + ServiceAccountCredentials string `json:"serviceAccountCredentials"` // Service Account Credentials JSON blob. + TeamDrive string `json:"teamDrive"` // ID of the Shared Drive (Team Drive). + AuthOwnerOnly bool `json:"authOwnerOnly" default:"false"` // Only consider files owned by the authenticated user. + UseTrash bool `json:"useTrash" default:"true"` // Send files to the trash instead of deleting permanently. + CopyShortcutContent bool `json:"copyShortcutContent" default:"false"` // Server side copy contents of shortcuts instead of the shortcut. + SkipGdocs bool `json:"skipGdocs" default:"false"` // Skip google documents in all listings. + ShowAllGdocs bool `json:"showAllGdocs" default:"false"` // Show all Google Docs including non-exportable ones in listings. + SkipChecksumGphotos bool `json:"skipChecksumGphotos" default:"false"` // Skip checksums on Google photos and videos only. + SharedWithMe bool `json:"sharedWithMe" default:"false"` // Only show files that are shared with me. + TrashedOnly bool `json:"trashedOnly" default:"false"` // Only show files that are in the trash. + StarredOnly bool `json:"starredOnly" default:"false"` // Only show files that are starred. + Formats string `json:"formats"` // Deprecated: See export_formats. + ExportFormats string `json:"exportFormats" default:"docx,xlsx,pptx,svg"` // Comma separated list of preferred formats for downloading Google docs. + ImportFormats string `json:"importFormats"` // Comma separated list of preferred formats for uploading Google docs. + AllowImportNameChange bool `json:"allowImportNameChange" default:"false"` // Allow the filetype to change when uploading Google docs. + UseCreatedDate bool `json:"useCreatedDate" default:"false"` // Use file created date instead of modified date. + UseSharedDate bool `json:"useSharedDate" default:"false"` // Use date file was shared instead of modified date. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk 100-1000, 0 to disable. + Impersonate string `json:"impersonate"` // Impersonate this user when using a service account. + AlternateExport bool `json:"alternateExport" default:"false"` // Deprecated: No longer needed. + UploadCutoff string `json:"uploadCutoff" default:"8Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"8Mi"` // Upload chunk size. + AcknowledgeAbuse bool `json:"acknowledgeAbuse" default:"false"` // Set to allow files which return cannotDownloadAbusiveFile to be downloaded. + KeepRevisionForever bool `json:"keepRevisionForever" default:"false"` // Keep new head revision of each file forever. + SizeAsQuota bool `json:"sizeAsQuota" default:"false"` // Show sizes as storage quota usage, not actual size. + V2DownloadMinSize string `json:"v2DownloadMinSize" default:"off"` // If Object's are greater, use drive v2 API to download. + PacerMinSleep string `json:"pacerMinSleep" default:"100ms"` // Minimum time to sleep between API calls. + PacerBurst int `json:"pacerBurst" default:"100"` // Number of API calls to allow without sleeping. + ServerSideAcrossConfigs bool `json:"serverSideAcrossConfigs" default:"false"` // Deprecated: use --server-side-across-configs instead. + DisableHttp2 bool `json:"disableHttp2" default:"true"` // Disable drive using http2. + StopOnUploadLimit bool `json:"stopOnUploadLimit" default:"false"` // Make upload limit errors be fatal. + StopOnDownloadLimit bool `json:"stopOnDownloadLimit" default:"false"` // Make download limit errors be fatal. + SkipShortcuts bool `json:"skipShortcuts" default:"false"` // If set skip shortcut files. + SkipDanglingShortcuts bool `json:"skipDanglingShortcuts" default:"false"` // If set skip dangling shortcut files. + ResourceKey string `json:"resourceKey"` // Resource key for accessing a link-shared file. + FastListBugFix bool `json:"fastListBugFix" default:"true"` // Work around a bug in Google Drive listing. + MetadataOwner string `json:"metadataOwner" default:"read" example:"off"` // Control whether owner should be read or written in metadata. + MetadataPermissions string `json:"metadataPermissions" default:"off" example:"off"` // Control whether permissions should be read or written in metadata. + MetadataLabels string `json:"metadataLabels" default:"off" example:"off"` // Control whether labels should be read or written in metadata. + MetadataEnforceExpansiveAccess bool `json:"metadataEnforceExpansiveAccess" default:"false"` // Whether the request should enforce expansive access rules. + Encoding string `json:"encoding" default:"InvalidUtf8"` // The encoding for the backend. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get IAM credentials from runtime (environment variables or instance meta data if no env vars). + Description string `json:"description"` // Description of the remote. } type createDriveStorageRequest struct { @@ -219,6 +233,7 @@ type dropboxConfig struct { Token string `json:"token"` // OAuth Access Token as a JSON blob. AuthUrl string `json:"authUrl"` // Auth server URL. TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. ChunkSize string `json:"chunkSize" default:"48Mi"` // Upload chunk size (< 150Mi). Impersonate string `json:"impersonate"` // Impersonate this user when using a business account. SharedFiles bool `json:"sharedFiles" default:"false"` // Instructs rclone to work on individual shared files. @@ -226,10 +241,13 @@ type dropboxConfig struct { PacerMinSleep string `json:"pacerMinSleep" default:"10ms"` // Minimum time to sleep between API calls. Encoding string `json:"encoding" default:"Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot"` // The encoding for the backend. RootNamespace string `json:"rootNamespace"` // Specify a different Dropbox namespace ID to use as the root for all paths. + ExportFormats string `json:"exportFormats" default:"html,md"` // Comma separated list of preferred formats for exporting files + SkipExports bool `json:"skipExports" default:"false"` // Skip exportable files in all listings. + ShowAllExports bool `json:"showAllExports" default:"false"` // Show all exportable files in listings. BatchMode string `json:"batchMode" default:"sync"` // Upload file batching sync|async|off. BatchSize int `json:"batchSize" default:"0"` // Max number of files in upload batch. BatchTimeout string `json:"batchTimeout" default:"0s"` // Max time to allow an idle upload batch before uploading. - BatchCommitTimeout string `json:"batchCommitTimeout" default:"10m0s"` // Max time to wait for a batch to finish committing + BatchCommitTimeout string `json:"batchCommitTimeout" default:"10m0s"` // Max time to wait for a batch to finish committing. (no longer used) Description string `json:"description"` // Description of the remote. } @@ -312,28 +330,31 @@ type createFilefabricStorageRequest struct { func createFilefabricStorage() {} type ftpConfig struct { - Host string `json:"host"` // FTP host to connect to. - User string `json:"user" default:"$USER"` // FTP username. - Port int `json:"port" default:"21"` // FTP port number. - Pass string `json:"pass"` // FTP password. - Tls bool `json:"tls" default:"false"` // Use Implicit FTPS (FTP over TLS). - ExplicitTls bool `json:"explicitTls" default:"false"` // Use Explicit FTPS (FTP over TLS). - Concurrency int `json:"concurrency" default:"0"` // Maximum number of FTP simultaneous connections, 0 for unlimited. - NoCheckCertificate bool `json:"noCheckCertificate" default:"false"` // Do not verify the TLS certificate of the server. - DisableEpsv bool `json:"disableEpsv" default:"false"` // Disable using EPSV even if server advertises support. - DisableMlsd bool `json:"disableMlsd" default:"false"` // Disable using MLSD even if server advertises support. - DisableUtf8 bool `json:"disableUtf8" default:"false"` // Disable using UTF-8 even if server advertises support. - WritingMdtm bool `json:"writingMdtm" default:"false"` // Use MDTM to set modification time (VsFtpd quirk) - ForceListHidden bool `json:"forceListHidden" default:"false"` // Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD. - IdleTimeout string `json:"idleTimeout" default:"1m0s"` // Max time before closing idle connections. - CloseTimeout string `json:"closeTimeout" default:"1m0s"` // Maximum time to wait for a response to close. - TlsCacheSize int `json:"tlsCacheSize" default:"32"` // Size of TLS session cache for all control and data connections. - DisableTls13 bool `json:"disableTls13" default:"false"` // Disable TLS 1.3 (workaround for FTP servers with buggy TLS) - ShutTimeout string `json:"shutTimeout" default:"1m0s"` // Maximum time to wait for data connection closing status. - AskPassword bool `json:"askPassword" default:"false"` // Allow asking for FTP password when needed. - SocksProxy string `json:"socksProxy"` // Socks 5 proxy host. - Encoding string `json:"encoding" default:"Slash,Del,Ctl,RightSpace,Dot" example:"Asterisk,Ctl,Dot,Slash"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. + Host string `json:"host"` // FTP host to connect to. + User string `json:"user" default:"$USER"` // FTP username. + Port int `json:"port" default:"21"` // FTP port number. + Pass string `json:"pass"` // FTP password. + Tls bool `json:"tls" default:"false"` // Use Implicit FTPS (FTP over TLS). + ExplicitTls bool `json:"explicitTls" default:"false"` // Use Explicit FTPS (FTP over TLS). + Concurrency int `json:"concurrency" default:"0"` // Maximum number of FTP simultaneous connections, 0 for unlimited. + NoCheckCertificate bool `json:"noCheckCertificate" default:"false"` // Do not verify the TLS certificate of the server. + DisableEpsv bool `json:"disableEpsv" default:"false"` // Disable using EPSV even if server advertises support. + DisableMlsd bool `json:"disableMlsd" default:"false"` // Disable using MLSD even if server advertises support. + DisableUtf8 bool `json:"disableUtf8" default:"false"` // Disable using UTF-8 even if server advertises support. + WritingMdtm bool `json:"writingMdtm" default:"false"` // Use MDTM to set modification time (VsFtpd quirk) + ForceListHidden bool `json:"forceListHidden" default:"false"` // Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD. + IdleTimeout string `json:"idleTimeout" default:"1m0s"` // Max time before closing idle connections. + CloseTimeout string `json:"closeTimeout" default:"1m0s"` // Maximum time to wait for a response to close. + TlsCacheSize int `json:"tlsCacheSize" default:"32"` // Size of TLS session cache for all control and data connections. + DisableTls13 bool `json:"disableTls13" default:"false"` // Disable TLS 1.3 (workaround for FTP servers with buggy TLS) + AllowInsecureTlsCiphers bool `json:"allowInsecureTlsCiphers" default:"false"` // Allow insecure TLS ciphers + ShutTimeout string `json:"shutTimeout" default:"1m0s"` // Maximum time to wait for data connection closing status. + AskPassword bool `json:"askPassword" default:"false"` // Allow asking for FTP password when needed. + SocksProxy string `json:"socksProxy"` // Socks 5 proxy host. + HttpProxy string `json:"httpProxy"` // URL for HTTP CONNECT proxy + NoCheckUpload bool `json:"noCheckUpload" default:"false"` // Don't check the upload is OK + Encoding string `json:"encoding" default:"Slash,Del,Ctl,RightSpace,Dot" example:"Asterisk,Ctl,Dot,Slash"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createFtpStorageRequest struct { @@ -361,10 +382,12 @@ type gcsConfig struct { Token string `json:"token"` // OAuth Access Token as a JSON blob. AuthUrl string `json:"authUrl"` // Auth server URL. TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. ProjectNumber string `json:"projectNumber"` // Project number. UserProject string `json:"userProject"` // User project. ServiceAccountFile string `json:"serviceAccountFile"` // Service Account Credentials JSON file path. ServiceAccountCredentials string `json:"serviceAccountCredentials"` // Service Account Credentials JSON blob. + AccessToken string `json:"accessToken"` // Short-lived access token. Anonymous bool `json:"anonymous" default:"false"` // Access public buckets and objects without credentials. ObjectAcl string `json:"objectAcl" example:"authenticatedRead"` // Access Control List for new objects. BucketAcl string `json:"bucketAcl" example:"authenticatedRead"` // Access Control List for new buckets. @@ -374,7 +397,7 @@ type gcsConfig struct { DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - Endpoint string `json:"endpoint"` // Endpoint for the service. + Endpoint string `json:"endpoint" example:"storage.example.org"` // Custom endpoint for the storage API. Leave blank to use the provider default. Encoding string `json:"encoding" default:"Slash,CrLf,InvalidUtf8,Dot"` // The encoding for the backend. EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars). Description string `json:"description"` // Description of the remote. @@ -405,15 +428,17 @@ type gphotosConfig struct { Token string `json:"token"` // OAuth Access Token as a JSON blob. AuthUrl string `json:"authUrl"` // Auth server URL. TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. ReadOnly bool `json:"readOnly" default:"false"` // Set to make the Google Photos backend read only. ReadSize bool `json:"readSize" default:"false"` // Set to read the size of media items. StartYear int `json:"startYear" default:"2000"` // Year limits the photos to be downloaded to those which are uploaded after the given year. IncludeArchived bool `json:"includeArchived" default:"false"` // Also view and download archived media. + Proxy string `json:"proxy"` // Use the gphotosdl proxy for downloading the full resolution images Encoding string `json:"encoding" default:"Slash,CrLf,InvalidUtf8,Dot"` // The encoding for the backend. BatchMode string `json:"batchMode" default:"sync"` // Upload file batching sync|async|off. BatchSize int `json:"batchSize" default:"0"` // Max number of files in upload batch. BatchTimeout string `json:"batchTimeout" default:"0s"` // Max time to allow an idle upload batch before uploading. - BatchCommitTimeout string `json:"batchCommitTimeout" default:"10m0s"` // Max time to wait for a batch to finish committing + BatchCommitTimeout string `json:"batchCommitTimeout" default:"10m0s"` // Max time to wait for a batch to finish committing. (no longer used) Description string `json:"description"` // Description of the remote. } @@ -470,6 +495,7 @@ type hidriveConfig struct { Token string `json:"token"` // OAuth Access Token as a JSON blob. AuthUrl string `json:"authUrl"` // Auth server URL. TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. ScopeAccess string `json:"scopeAccess" default:"rw" example:"rw"` // Access permissions that rclone should use when requesting access from HiDrive. ScopeRole string `json:"scopeRole" default:"user" example:"user"` // User-level that rclone should use when requesting access from HiDrive. RootPrefix string `json:"rootPrefix" default:"/" example:"/"` // The root/parent folder for all paths. @@ -530,14 +556,16 @@ type createHttpStorageRequest struct { func createHttpStorage() {} type internetarchiveConfig struct { - AccessKeyId string `json:"accessKeyId"` // IAS3 Access Key. - SecretAccessKey string `json:"secretAccessKey"` // IAS3 Secret Key (password). - Endpoint string `json:"endpoint" default:"https://s3.us.archive.org"` // IAS3 Endpoint. - FrontEndpoint string `json:"frontEndpoint" default:"https://archive.org"` // Host of InternetArchive Frontend. - DisableChecksum bool `json:"disableChecksum" default:"true"` // Don't ask the server to test against MD5 checksum calculated by rclone. - WaitArchive string `json:"waitArchive" default:"0s"` // Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish. - Encoding string `json:"encoding" default:"Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. + AccessKeyId string `json:"accessKeyId"` // IAS3 Access Key. + SecretAccessKey string `json:"secretAccessKey"` // IAS3 Secret Key (password). + Endpoint string `json:"endpoint" default:"https://s3.us.archive.org"` // IAS3 Endpoint. + FrontEndpoint string `json:"frontEndpoint" default:"https://archive.org"` // Host of InternetArchive Frontend. + ItemMetadata []string `json:"itemMetadata" default:"[]"` // Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set. + ItemDerive bool `json:"itemDerive" default:"true"` // Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload. + DisableChecksum bool `json:"disableChecksum" default:"true"` // Don't ask the server to test against MD5 checksum calculated by rclone. + WaitArchive string `json:"waitArchive" default:"0s"` // Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish. + Encoding string `json:"encoding" default:"Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createInternetarchiveStorageRequest struct { @@ -565,6 +593,7 @@ type jottacloudConfig struct { Token string `json:"token"` // OAuth Access Token as a JSON blob. AuthUrl string `json:"authUrl"` // Auth server URL. TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. Md5MemoryLimit string `json:"md5MemoryLimit" default:"10Mi"` // Files bigger than this will be cached on disk to calculate the MD5 if required. TrashedOnly bool `json:"trashedOnly" default:"false"` // Only show files that are in the trash. HardDelete bool `json:"hardDelete" default:"false"` // Delete files permanently rather than putting them into the trash. @@ -681,8 +710,9 @@ func createKoofrOtherStorage() {} type localConfig struct { Nounc bool `json:"nounc" default:"false" example:"true"` // Disable UNC (long path names) conversion on Windows. CopyLinks bool `json:"copyLinks" default:"false"` // Follow symlinks and copy the pointed to item. - Links bool `json:"links" default:"false"` // Translate symlinks to/from regular files with a '.rclonelink' extension. + Links bool `json:"links" default:"false"` // Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend. SkipLinks bool `json:"skipLinks" default:"false"` // Don't warn about skipped symlinks. + SkipSpecials bool `json:"skipSpecials" default:"false"` // Don't warn about skipped pipes, sockets and device objects. ZeroSizeLinks bool `json:"zeroSizeLinks" default:"false"` // Assume the Stat size of links is zero (and read them instead) (deprecated). UnicodeNormalization bool `json:"unicodeNormalization" default:"false"` // Apply unicode NFC normalization to paths and filenames. NoCheckUpdated bool `json:"noCheckUpdated" default:"false"` // Don't check to see if the files change during upload. @@ -694,6 +724,7 @@ type localConfig struct { NoSparse bool `json:"noSparse" default:"false"` // Disable sparse files for multi-thread downloads. NoSetModtime bool `json:"noSetModtime" default:"false"` // Disable setting modtime. TimeType string `json:"timeType" default:"mtime" example:"mtime"` // Set what kind of time is returned. + Hashes string `json:"hashes"` // Comma separated list of supported checksum types. Encoding string `json:"encoding" default:"Slash,Dot"` // The encoding for the backend. Description string `json:"description"` // Description of the remote. } @@ -723,6 +754,7 @@ type mailruConfig struct { Token string `json:"token"` // OAuth Access Token as a JSON blob. AuthUrl string `json:"authUrl"` // Auth server URL. TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. User string `json:"user"` // User name (usually email). Pass string `json:"pass"` // Password. SpeedupEnable bool `json:"speedupEnable" default:"true" example:"true"` // Skip full upload if there is another file with same data hash. @@ -758,6 +790,9 @@ func createMailruStorage() {} type megaConfig struct { User string `json:"user"` // User name. Pass string `json:"pass"` // Password. + TwoFA string `json:"2fa"` // The 2FA code of your MEGA account if the account is set up with one + SessionId string `json:"sessionId"` // Session (internal use only) + MasterKey string `json:"masterKey"` // Master key (internal use only) Debug bool `json:"debug" default:"false"` // Output more debug from Mega. HardDelete bool `json:"hardDelete" default:"false"` // Delete files permanently rather than putting them into the trash. UseHttps bool `json:"useHttps" default:"false"` // Use HTTPS for transfers. @@ -817,12 +852,15 @@ type onedriveConfig struct { Token string `json:"token"` // OAuth Access Token as a JSON blob. AuthUrl string `json:"authUrl"` // Auth server URL. TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. Region string `json:"region" default:"global" example:"global"` // Choose national cloud region for OneDrive. + UploadCutoff string `json:"uploadCutoff" default:"off"` // Cutoff for switching to chunked upload. ChunkSize string `json:"chunkSize" default:"10Mi"` // Chunk size to upload files with - must be multiple of 320k (327,680 bytes). DriveId string `json:"driveId"` // The ID of the drive to use. DriveType string `json:"driveType"` // The type of the drive (personal | business | documentLibrary). RootFolderId string `json:"rootFolderId"` // ID of the root folder. AccessScopes string `json:"accessScopes" default:"Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" example:"Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access"` // Set scopes to be requested by rclone. + Tenant string `json:"tenant"` // ID of the service principal's tenant. Also called its directory ID. DisableSitePermission bool `json:"disableSitePermission" default:"false"` // Disable the request for Sites.Read.All permission. ExposeOnenoteFiles bool `json:"exposeOnenoteFiles" default:"false"` // Set to make OneNote files show up in directory listings. ServerSideAcrossConfigs bool `json:"serverSideAcrossConfigs" default:"false"` // Deprecated: use --server-side-across-configs instead. @@ -861,7 +899,7 @@ func createOnedriveStorage() {} type oosEnv_authConfig struct { Namespace string `json:"namespace"` // Object storage namespace - Compartment string `json:"compartment"` // Object storage compartment OCID + Compartment string `json:"compartment"` // Specify compartment OCID, if you need to list buckets. Region string `json:"region"` // Object storage Region Endpoint string `json:"endpoint"` // Endpoint for Object storage API. StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm @@ -905,7 +943,7 @@ func createOosEnv_authStorage() {} type oosInstance_principal_authConfig struct { Namespace string `json:"namespace"` // Object storage namespace - Compartment string `json:"compartment"` // Object storage compartment OCID + Compartment string `json:"compartment"` // Specify compartment OCID, if you need to list buckets. Region string `json:"region"` // Object storage Region Endpoint string `json:"endpoint"` // Endpoint for Object storage API. StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm @@ -992,7 +1030,7 @@ func createOosNo_authStorage() {} type oosResource_principal_authConfig struct { Namespace string `json:"namespace"` // Object storage namespace - Compartment string `json:"compartment"` // Object storage compartment OCID + Compartment string `json:"compartment"` // Specify compartment OCID, if you need to list buckets. Region string `json:"region"` // Object storage Region Endpoint string `json:"endpoint"` // Endpoint for Object storage API. StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm @@ -1036,7 +1074,7 @@ func createOosResource_principal_authStorage() {} type oosUser_principal_authConfig struct { Namespace string `json:"namespace"` // Object storage namespace - Compartment string `json:"compartment"` // Object storage compartment OCID + Compartment string `json:"compartment"` // Specify compartment OCID, if you need to list buckets. Region string `json:"region"` // Object storage Region Endpoint string `json:"endpoint"` // Endpoint for Object storage API. ConfigFile string `json:"configFile" default:"~/.oci/config" example:"~/.oci/config"` // Path to OCI config file @@ -1082,7 +1120,7 @@ func createOosUser_principal_authStorage() {} type oosWorkload_identity_authConfig struct { Namespace string `json:"namespace"` // Object storage namespace - Compartment string `json:"compartment"` // Object storage compartment OCID + Compartment string `json:"compartment"` // Specify compartment OCID, if you need to list buckets. Region string `json:"region"` // Object storage Region Endpoint string `json:"endpoint"` // Endpoint for Object storage API. StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm @@ -1129,6 +1167,7 @@ type opendriveConfig struct { Password string `json:"password"` // Password. Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot"` // The encoding for the backend. ChunkSize string `json:"chunkSize" default:"10Mi"` // Files will be uploaded in chunks this size. + Access string `json:"access" default:"private" example:"private"` // Files and folders will be uploaded with this access permission (default private) Description string `json:"description"` // Description of the remote. } @@ -1152,17 +1191,18 @@ type createOpendriveStorageRequest struct { func createOpendriveStorage() {} type pcloudConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. - RootFolderId string `json:"rootFolderId" default:"d0"` // Fill in for rclone to use a non root folder as its starting point. - Hostname string `json:"hostname" default:"api.pcloud.com" example:"api.pcloud.com"` // Hostname to connect to. - Username string `json:"username"` // Your pcloud username. - Password string `json:"password"` // Your pcloud password. - Description string `json:"description"` // Description of the remote. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. + Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + RootFolderId string `json:"rootFolderId" default:"d0"` // Fill in for rclone to use a non root folder as its starting point. + Hostname string `json:"hostname" default:"api.pcloud.com" example:"api.pcloud.com"` // Hostname to connect to. + Username string `json:"username"` // Your pcloud username. + Password string `json:"password"` // Your pcloud password. + Description string `json:"description"` // Description of the remote. } type createPcloudStorageRequest struct { @@ -1185,14 +1225,15 @@ type createPcloudStorageRequest struct { func createPcloudStorage() {} type premiumizemeConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - ApiKey string `json:"apiKey"` // API Key. - Encoding string `json:"encoding" default:"Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. + ApiKey string `json:"apiKey"` // API Key. + Encoding string `json:"encoding" default:"Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createPremiumizemeStorageRequest struct { @@ -1215,13 +1256,14 @@ type createPremiumizemeStorageRequest struct { func createPremiumizemeStorage() {} type putioConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. + Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createPutioStorageRequest struct { @@ -1277,63 +1319,72 @@ type createQingstorStorageRequest struct { func createQingstorStorage() {} type s3AWSConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"us-east-1"` // Region to connect to. - Endpoint string `json:"endpoint"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint" example:""` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - RequesterPays bool `json:"requesterPays" default:"false"` // Enables requester pays option when interacting with S3 bucket. - ServerSideEncryption string `json:"serverSideEncryption" example:""` // The server-side encryption algorithm used when storing this object in S3. - SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. - SseKmsKeyId string `json:"sseKmsKeyId" example:""` // If using KMS ID you must provide the ARN of Key. - SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. - SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. - SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). - StorageClass string `json:"storageClass" example:""` // The storage class to use when storing new objects in S3. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - UseAccelerateEndpoint bool `json:"useAccelerateEndpoint" default:"false"` // If true use the AWS S3 accelerated endpoint. - LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - StsEndpoint string `json:"stsEndpoint"` // Endpoint for STS (deprecated). - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"us-east-1"` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint" example:""` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + RequesterPays bool `json:"requesterPays" default:"false"` // Enables requester pays option when interacting with S3 bucket. + ServerSideEncryption string `json:"serverSideEncryption"` // The server-side encryption algorithm used when storing this object in S3. + SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // If using KMS ID you must provide the ARN of Key. + SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. + SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. + SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + StorageClass string `json:"storageClass" example:"REDUCED_REDUNDANCY"` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseAccelerateEndpoint bool `json:"useAccelerateEndpoint" default:"false"` // If true use the AWS S3 accelerated endpoint. + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + StsEndpoint string `json:"stsEndpoint"` // Endpoint for STS (deprecated). + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + DirectoryBucket bool `json:"directoryBucket" default:"false"` // Set to use AWS Directory Buckets + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3AWSStorageRequest struct { @@ -1356,51 +1407,59 @@ type createS3AWSStorageRequest struct { func createS3AWSStorage() {} type s3AlibabaConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"oss-accelerate.aliyuncs.com"` // Endpoint for OSS API. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - StorageClass string `json:"storageClass" example:""` // The storage class to use when storing new objects in OSS. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"oss-accelerate.aliyuncs.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass"` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3AlibabaStorageRequest struct { @@ -1423,52 +1482,60 @@ type createS3AlibabaStorageRequest struct { func createS3AlibabaStorage() {} type s3ArvanCloudConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"s3.ir-thr-at1.arvanstorage.ir"` // Endpoint for Arvan Cloud Object Storage (AOS) API. - LocationConstraint string `json:"locationConstraint" example:"ir-thr-at1"` // Location constraint - must match endpoint. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - StorageClass string `json:"storageClass" example:"STANDARD"` // The storage class to use when storing new objects in ArvanCloud. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"s3.ir-thr-at1.arvanstorage.ir"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint" example:"ir-thr-at1"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass"` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3ArvanCloudStorageRequest struct { @@ -1490,59 +1557,142 @@ type createS3ArvanCloudStorageRequest struct { // @Router /storage/s3/arvancloud [post] func createS3ArvanCloudStorage() {} +type s3BizflyCloudConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"hn"` // Region to connect to. + Endpoint string `json:"endpoint" example:"hn.ss.bfcplatform.vn"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3BizflyCloudStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3BizflyCloudConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3BizflyCloudStorage +// @Summary Create S3 storage with BizflyCloud - Bizfly Cloud Simple Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3BizflyCloudStorageRequest true "Request body" +// @Router /storage/s3/bizflycloud [post] +func createS3BizflyCloudStorage() {} + type s3CephConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - ServerSideEncryption string `json:"serverSideEncryption" example:""` // The server-side encryption algorithm used when storing this object in S3. - SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. - SseKmsKeyId string `json:"sseKmsKeyId" example:""` // If using KMS ID you must provide the ARN of Key. - SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. - SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. - SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + ServerSideEncryption string `json:"serverSideEncryption"` // The server-side encryption algorithm used when storing this object in S3. + SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // If using KMS ID you must provide the ARN of Key. + SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. + SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. + SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3CephStorageRequest struct { @@ -1565,57 +1715,65 @@ type createS3CephStorageRequest struct { func createS3CephStorage() {} type s3ChinaMobileConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"eos-wuxi-1.cmecloud.cn"` // Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API. - LocationConstraint string `json:"locationConstraint" example:"wuxi1"` // Location constraint - must match endpoint. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - ServerSideEncryption string `json:"serverSideEncryption" example:""` // The server-side encryption algorithm used when storing this object in S3. - SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. - SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. - SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. - SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). - StorageClass string `json:"storageClass" example:""` // The storage class to use when storing new objects in ChinaMobile. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"eos-wuxi-1.cmecloud.cn"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint" example:"wuxi1"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + ServerSideEncryption string `json:"serverSideEncryption"` // The server-side encryption algorithm used when storing this object in S3. + SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. + SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. + SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. + SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + StorageClass string `json:"storageClass"` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3ChinaMobileStorageRequest struct { @@ -1638,50 +1796,57 @@ type createS3ChinaMobileStorageRequest struct { func createS3ChinaMobileStorage() {} type s3CloudflareConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"auto"` // Region to connect to. - Endpoint string `json:"endpoint"` // Endpoint for S3 API. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"auto"` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3CloudflareStorageRequest struct { @@ -1703,53 +1868,136 @@ type createS3CloudflareStorageRequest struct { // @Router /storage/s3/cloudflare [post] func createS3CloudflareStorage() {} +type s3CubbitConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"eu-west-1"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.cubbit.eu"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3CubbitStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3CubbitConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3CubbitStorage +// @Summary Create S3 storage with Cubbit - Cubbit DS3 Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3CubbitStorageRequest true "Request body" +// @Router /storage/s3/cubbit [post] +func createS3CubbitStorage() {} + type s3DigitalOceanConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint" example:"syd1.digitaloceanspaces.com"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"syd1.digitaloceanspaces.com"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3DigitalOceanStorageRequest struct { @@ -1772,52 +2020,60 @@ type createS3DigitalOceanStorageRequest struct { func createS3DigitalOceanStorage() {} type s3DreamhostConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint" example:"objects-us-east-1.dream.io"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"objects-us-east-1.dream.io"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3DreamhostStorageRequest struct { @@ -1839,53 +2095,284 @@ type createS3DreamhostStorageRequest struct { // @Router /storage/s3/dreamhost [post] func createS3DreamhostStorage() {} +type s3ExabaConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3ExabaStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3ExabaConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3ExabaStorage +// @Summary Create S3 storage with Exaba - Exaba Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3ExabaStorageRequest true "Request body" +// @Router /storage/s3/exaba [post] +func createS3ExabaStorage() {} + +type s3FileLuConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"global"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s5lu.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3FileLuStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3FileLuConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3FileLuStorage +// @Summary Create S3 storage with FileLu - FileLu S5 (S3-Compatible Object Storage) +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3FileLuStorageRequest true "Request body" +// @Router /storage/s3/filelu [post] +func createS3FileLuStorage() {} + +type s3FlashBladeConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3FlashBladeStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3FlashBladeConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3FlashBladeStorage +// @Summary Create S3 storage with FlashBlade - Pure Storage FlashBlade Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3FlashBladeStorageRequest true "Request body" +// @Router /storage/s3/flashblade [post] +func createS3FlashBladeStorage() {} + type s3GCSConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint" example:"https://storage.googleapis.com"` // Endpoint for Google Cloud Storage. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"https://storage.googleapis.com"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3GCSStorageRequest struct { @@ -1907,52 +2394,136 @@ type createS3GCSStorageRequest struct { // @Router /storage/s3/gcs [post] func createS3GCSStorage() {} +type s3HetznerConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"hel1"` // Region to connect to. + Endpoint string `json:"endpoint" example:"hel1.your-objectstorage.com"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3HetznerStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3HetznerConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3HetznerStorage +// @Summary Create S3 storage with Hetzner - Hetzner Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3HetznerStorageRequest true "Request body" +// @Router /storage/s3/hetzner [post] +func createS3HetznerStorage() {} + type s3HuaweiOBSConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"af-south-1"` // Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint. - Endpoint string `json:"endpoint" example:"obs.af-south-1.myhuaweicloud.com"` // Endpoint for OBS API. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"af-south-1"` // Region to connect to. + Endpoint string `json:"endpoint" example:"obs.af-south-1.myhuaweicloud.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3HuaweiOBSStorageRequest struct { @@ -1975,52 +2546,62 @@ type createS3HuaweiOBSStorageRequest struct { func createS3HuaweiOBSStorage() {} type s3IBMCOSConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint" example:"s3.us.cloud-object-storage.appdomain.cloud"` // Endpoint for IBM COS S3 API. - LocationConstraint string `json:"locationConstraint" example:"us-standard"` // Location constraint - must match endpoint when using IBM Cloud Public. - Acl string `json:"acl" example:"private"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.us.cloud-object-storage.appdomain.cloud"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint" example:"us-standard"` // Location constraint - must be set to match the Region. + Acl string `json:"acl" example:"private"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + IbmApiKey string `json:"ibmApiKey"` // IBM API Key to be used to obtain IAM token + IbmResourceInstanceId string `json:"ibmResourceInstanceId"` // IBM service instance id + Description string `json:"description"` // Description of the remote. } type createS3IBMCOSStorageRequest struct { @@ -2043,49 +2624,57 @@ type createS3IBMCOSStorageRequest struct { func createS3IBMCOSStorage() {} type s3IDriveConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3IDriveStorageRequest struct { @@ -2108,51 +2697,59 @@ type createS3IDriveStorageRequest struct { func createS3IDriveStorage() {} type s3IONOSConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"de"` // Region where your bucket will be created and your data stored. - Endpoint string `json:"endpoint" example:"s3-eu-central-1.ionoscloud.com"` // Endpoint for IONOS S3 Object Storage. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"eu-central-2"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3-eu-central-1.ionoscloud.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3IONOSStorageRequest struct { @@ -2174,52 +2771,135 @@ type createS3IONOSStorageRequest struct { // @Router /storage/s3/ionos [post] func createS3IONOSStorage() {} +type s3IntercoloConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"de-fra"` // Region to connect to. + Endpoint string `json:"endpoint" example:"de-fra.i3storage.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3IntercoloStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3IntercoloConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3IntercoloStorage +// @Summary Create S3 storage with Intercolo - Intercolo Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3IntercoloStorageRequest true "Request body" +// @Router /storage/s3/intercolo [post] +func createS3IntercoloStorage() {} + type s3LeviiaConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint"` // Endpoint for S3 API. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.leviia.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3LeviiaStorageRequest struct { @@ -2242,51 +2922,59 @@ type createS3LeviiaStorageRequest struct { func createS3LeviiaStorage() {} type s3LiaraConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"storage.iran.liara.space"` // Endpoint for Liara Object Storage API. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - StorageClass string `json:"storageClass" example:"STANDARD"` // The storage class to use when storing new objects in Liara - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"storage.iran.liara.space"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass"` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3LiaraStorageRequest struct { @@ -2309,50 +2997,58 @@ type createS3LiaraStorageRequest struct { func createS3LiaraStorage() {} type s3LinodeConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"us-southeast-1.linodeobjects.com"` // Endpoint for Linode Object Storage API. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"nl-ams-1.linodeobjects.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3LinodeStorageRequest struct { @@ -2375,52 +3071,60 @@ type createS3LinodeStorageRequest struct { func createS3LinodeStorage() {} type s3LyveCloudConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint" example:"s3.us-east-1.lyvecloud.seagate.com"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.us-west-1.{account_name}.lyve.seagate.com"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3LyveCloudStorageRequest struct { @@ -2443,51 +3147,59 @@ type createS3LyveCloudStorageRequest struct { func createS3LyveCloudStorage() {} type s3MagaluConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"br-se1.magaluobjects.com"` // Endpoint for S3 API. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - StorageClass string `json:"storageClass" example:"STANDARD"` // The storage class to use when storing new objects in Magalu. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"br-se1.magaluobjects.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass"` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3MagaluStorageRequest struct { @@ -2509,59 +3221,140 @@ type createS3MagaluStorageRequest struct { // @Router /storage/s3/magalu [post] func createS3MagaluStorage() {} +type s3MegaConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"s3.eu-central-1.s4.mega.io"` // Endpoint for S3 API. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3MegaStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3MegaConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3MegaStorage +// @Summary Create S3 storage with Mega - MEGA S4 Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3MegaStorageRequest true "Request body" +// @Router /storage/s3/mega [post] +func createS3MegaStorage() {} + type s3MinioConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - ServerSideEncryption string `json:"serverSideEncryption" example:""` // The server-side encryption algorithm used when storing this object in S3. - SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. - SseKmsKeyId string `json:"sseKmsKeyId" example:""` // If using KMS ID you must provide the ARN of Key. - SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. - SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. - SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + ServerSideEncryption string `json:"serverSideEncryption"` // The server-side encryption algorithm used when storing this object in S3. + SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // If using KMS ID you must provide the ARN of Key. + SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. + SseCustomerKeyBase64 string `json:"sseCustomerKeyBase64" example:""` // If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. + SseCustomerKeyMd5 string `json:"sseCustomerKeyMd5" example:""` // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3MinioStorageRequest struct { @@ -2584,52 +3377,60 @@ type createS3MinioStorageRequest struct { func createS3MinioStorage() {} type s3NeteaseConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3NeteaseStorageRequest struct { @@ -2651,53 +3452,136 @@ type createS3NeteaseStorageRequest struct { // @Router /storage/s3/netease [post] func createS3NeteaseStorage() {} +type s3OVHcloudConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"gra"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.gra.io.cloud.ovh.net"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3OVHcloudStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3OVHcloudConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3OVHcloudStorage +// @Summary Create S3 storage with OVHcloud - OVHcloud Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3OVHcloudStorageRequest true "Request body" +// @Router /storage/s3/ovhcloud [post] +func createS3OVHcloudStorage() {} + type s3OtherConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3OtherStorageRequest struct { @@ -2719,52 +3603,135 @@ type createS3OtherStorageRequest struct { // @Router /storage/s3/other [post] func createS3OtherStorage() {} +type s3OutscaleConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"eu-west-2"` // Region to connect to. + Endpoint string `json:"endpoint" example:"oos.eu-west-2.outscale.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3OutscaleStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3OutscaleConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3OutscaleStorage +// @Summary Create S3 storage with Outscale - OUTSCALE Object Storage (OOS) +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3OutscaleStorageRequest true "Request body" +// @Router /storage/s3/outscale [post] +func createS3OutscaleStorage() {} + type s3PetaboxConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"us-east-1"` // Region where your bucket will be created and your data stored. - Endpoint string `json:"endpoint" example:"s3.petabox.io"` // Endpoint for Petabox S3 Object Storage. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"eu-central-1"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.petabox.io"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3PetaboxStorageRequest struct { @@ -2787,53 +3754,61 @@ type createS3PetaboxStorageRequest struct { func createS3PetaboxStorage() {} type s3QiniuConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"cn-east-1"` // Region to connect to. - Endpoint string `json:"endpoint" example:"s3-cn-east-1.qiniucs.com"` // Endpoint for Qiniu Object Storage. - LocationConstraint string `json:"locationConstraint" example:"cn-east-1"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - StorageClass string `json:"storageClass" example:"STANDARD"` // The storage class to use when storing new objects in Qiniu. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"cn-east-1"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3-cn-east-1.qiniucs.com"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint" example:"cn-east-1"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass" example:"LINE"` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3QiniuStorageRequest struct { @@ -2855,53 +3830,135 @@ type createS3QiniuStorageRequest struct { // @Router /storage/s3/qiniu [post] func createS3QiniuStorage() {} +type s3RabataConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"eu-west-1"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.us-east-1.rabata.io"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint" example:"us-east-1"` // Location constraint - must be set to match the Region. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3RabataStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3RabataConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3RabataStorage +// @Summary Create S3 storage with Rabata - Rabata Cloud Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3RabataStorageRequest true "Request body" +// @Router /storage/s3/rabata [post] +func createS3RabataStorage() {} + type s3RackCorpConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"global"` // region - the location where your bucket will be created and your data stored. - Endpoint string `json:"endpoint" example:"s3.rackcorp.com"` // Endpoint for RackCorp Object Storage. - LocationConstraint string `json:"locationConstraint" example:"global"` // Location constraint - the location where your bucket will be located and your data stored. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"global"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.rackcorp.com"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint" example:"global"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3RackCorpStorageRequest struct { @@ -2924,52 +3981,56 @@ type createS3RackCorpStorageRequest struct { func createS3RackCorpStorage() {} type s3RcloneConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3RcloneStorageRequest struct { @@ -2992,52 +4053,60 @@ type createS3RcloneStorageRequest struct { func createS3RcloneStorage() {} type s3ScalewayConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"nl-ams"` // Region to connect to. - Endpoint string `json:"endpoint" example:"s3.nl-ams.scw.cloud"` // Endpoint for Scaleway Object Storage. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - StorageClass string `json:"storageClass" example:""` // The storage class to use when storing new objects in S3. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"nl-ams"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.nl-ams.scw.cloud"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass" example:""` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3ScalewayStorageRequest struct { @@ -3060,52 +4129,60 @@ type createS3ScalewayStorageRequest struct { func createS3ScalewayStorage() {} type s3SeaweedFSConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint" example:"localhost:8333"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"localhost:8333"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3SeaweedFSStorageRequest struct { @@ -3127,52 +4204,279 @@ type createS3SeaweedFSStorageRequest struct { // @Router /storage/s3/seaweedfs [post] func createS3SeaweedFSStorage() {} +type s3SelectelConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"ru-3"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.ru-1.storage.selcloud.ru"` // Endpoint for S3 API. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3SelectelStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3SelectelConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3SelectelStorage +// @Summary Create S3 storage with Selectel - Selectel Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3SelectelStorageRequest true "Request body" +// @Router /storage/s3/selectel [post] +func createS3SelectelStorage() {} + +type s3ServercoreConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"uz-2"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.ru-1.storage.selcloud.ru"` // Endpoint for S3 API. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3ServercoreStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3ServercoreConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3ServercoreStorage +// @Summary Create S3 storage with Servercore - Servercore Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3ServercoreStorageRequest true "Request body" +// @Router /storage/s3/servercore [post] +func createS3ServercoreStorage() {} + +type s3SpectraLogicConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3SpectraLogicStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3SpectraLogicConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3SpectraLogicStorage +// @Summary Create S3 storage with SpectraLogic - Spectra Logic Black Pearl +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3SpectraLogicStorageRequest true "Request body" +// @Router /storage/s3/spectralogic [post] +func createS3SpectraLogicStorage() {} + type s3StackPathConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint" example:"s3.us-east-2.stackpathstorage.com"` // Endpoint for StackPath Object Storage. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.us-east-2.stackpathstorage.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3StackPathStorageRequest struct { @@ -3195,49 +4499,56 @@ type createS3StackPathStorageRequest struct { func createS3StackPathStorage() {} type s3StorjConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"gateway.storjshare.io"` // Endpoint for Storj Gateway. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"gateway.storjshare.io"` // Endpoint for S3 API. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3StorjStorageRequest struct { @@ -3260,51 +4571,58 @@ type createS3StorjStorageRequest struct { func createS3StorjStorage() {} type s3SynologyConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:"eu-001"` // Region where your data stored. - Endpoint string `json:"endpoint" example:"eu-001.s3.synologyc2.net"` // Endpoint for Synology C2 Object Storage API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"eu-001"` // Region to connect to. + Endpoint string `json:"endpoint" example:"eu-001.s3.synologyc2.net"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3SynologyStorageRequest struct { @@ -3327,51 +4645,59 @@ type createS3SynologyStorageRequest struct { func createS3SynologyStorage() {} type s3TencentCOSConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"cos.ap-beijing.myqcloud.com"` // Endpoint for Tencent COS API. - Acl string `json:"acl" example:"default"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - StorageClass string `json:"storageClass" example:""` // The storage class to use when storing new objects in Tencent COS. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"cos.ap-beijing.myqcloud.com"` // Endpoint for S3 API. + Acl string `json:"acl" example:"default"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass" example:"ARCHIVE"` // The storage class to use when storing new objects in S3. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3TencentCOSStorageRequest struct { @@ -3394,52 +4720,60 @@ type createS3TencentCOSStorageRequest struct { func createS3TencentCOSStorage() {} type s3WasabiConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Region string `json:"region" example:""` // Region to connect to. - Endpoint string `json:"endpoint" example:"s3.wasabisys.com"` // Endpoint for S3 API. - LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. - UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. - SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK - Description string `json:"description"` // Description of the remote. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region"` // Region to connect to. + Endpoint string `json:"endpoint" example:"s3.wasabisys.com"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3WasabiStorageRequest struct { @@ -3461,17 +4795,93 @@ type createS3WasabiStorageRequest struct { // @Router /storage/s3/wasabi [post] func createS3WasabiStorage() {} +type s3ZataConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"us-east-1"` // Region to connect to. + Endpoint string `json:"endpoint" example:"idr01.zata.ai"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + RoleArn string `json:"roleArn"` // ARN of the IAM role to assume. + RoleSessionName string `json:"roleSessionName"` // Session name for assumed role. + RoleSessionDuration string `json:"roleSessionDuration"` // Session duration for assumed role. + RoleExternalId string `json:"roleExternalId"` // External ID for assumed role. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseArnRegion bool `json:"useArnRegion" default:"false"` // If true, enables arn region support for the service. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + UseDataIntegrityProtections string `json:"useDataIntegrityProtections" default:"unset"` // If true use AWS S3 data integrity protections. + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + UseXId string `json:"useXId" default:"unset"` // Set if rclone should add x-id URL parameters. + SignAcceptEncoding string `json:"signAcceptEncoding" default:"unset"` // Set if rclone should include Accept-Encoding as part of the signature. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3ZataStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3ZataConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3ZataStorage +// @Summary Create S3 storage with Zata - Zata (S3 compatible Gateway) +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3ZataStorageRequest true "Request body" +// @Router /storage/s3/zata [post] +func createS3ZataStorage() {} + type seafileConfig struct { - Url string `json:"url" example:"https://cloud.seafile.com/"` // URL of seafile host to connect to. - User string `json:"user"` // User name (usually email address). - Pass string `json:"pass"` // Password. - TwoFA bool `json:"2fa" default:"false"` // Two-factor authentication ('true' if the account has 2FA enabled). - Library string `json:"library"` // Name of the library. - LibraryKey string `json:"libraryKey"` // Library password (for encrypted libraries only). - CreateLibrary bool `json:"createLibrary" default:"false"` // Should rclone create a library if it doesn't exist. - AuthToken string `json:"authToken"` // Authentication token. - Encoding string `json:"encoding" default:"Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. + Url string `json:"url" example:"https://cloud.seafile.com/"` // URL of seafile host to connect to. + User string `json:"user"` // User name (usually email address). + Pass string `json:"pass"` // Password. + TwoFA bool `json:"2fa" default:"false"` // Two-factor authentication ('true' if the account has 2FA enabled). + Library string `json:"library"` // Name of the library. + LibraryKey string `json:"libraryKey"` // Library password (for encrypted libraries only). + CreateLibrary bool `json:"createLibrary" default:"false"` // Should rclone create a library if it doesn't exist. + AuthToken string `json:"authToken"` // Authentication token. + Encoding string `json:"encoding" default:"Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createSeafileStorageRequest struct { @@ -3501,6 +4911,7 @@ type sftpConfig struct { KeyPem string `json:"keyPem"` // Raw PEM-encoded private key. KeyFile string `json:"keyFile"` // Path to PEM-encoded private key file. KeyFilePass string `json:"keyFilePass"` // The passphrase to decrypt the PEM-encoded private key file. + Pubkey string `json:"pubkey"` // SSH public certificate for public certificate based authentication. PubkeyFile string `json:"pubkeyFile"` // Optional path to public key file. KnownHostsFile string `json:"knownHostsFile" example:"~/.ssh/known_hosts"` // Optional path to known_hosts file. KeyUseAgent bool `json:"keyUseAgent" default:"false"` // When set forces the usage of the ssh-agent. @@ -3510,8 +4921,14 @@ type sftpConfig struct { PathOverride string `json:"pathOverride"` // Override path used by SSH shell commands. SetModtime bool `json:"setModtime" default:"true"` // Set the modified time on the remote if set. ShellType string `json:"shellType" example:"none"` // The type of SSH shell on remote server, if any. - Md5sumCommand string `json:"md5sumCommand"` // The command used to read md5 hashes. - Sha1sumCommand string `json:"sha1sumCommand"` // The command used to read sha1 hashes. + Hashes string `json:"hashes"` // Comma separated list of supported checksum types. + Md5sumCommand string `json:"md5sumCommand"` // The command used to read MD5 hashes. + Sha1sumCommand string `json:"sha1sumCommand"` // The command used to read SHA-1 hashes. + Crc32sumCommand string `json:"crc32sumCommand"` // The command used to read CRC-32 hashes. + Sha256sumCommand string `json:"sha256sumCommand"` // The command used to read SHA-256 hashes. + Blake3sumCommand string `json:"blake3sumCommand"` // The command used to read BLAKE3 hashes. + Xxh3sumCommand string `json:"xxh3sumCommand"` // The command used to read XXH3 hashes. + Xxh128sumCommand string `json:"xxh128sumCommand"` // The command used to read XXH128 hashes. SkipLinks bool `json:"skipLinks" default:"false"` // Set to skip any symlinks and any other non regular files. Subsystem string `json:"subsystem" default:"sftp"` // Specifies the SSH2 subsystem on the remote host. ServerCommand string `json:"serverCommand"` // Specifies the path or command to run a sftp server on the remote host. @@ -3529,6 +4946,7 @@ type sftpConfig struct { HostKeyAlgorithms string `json:"hostKeyAlgorithms"` // Space separated list of host key algorithms, ordered by preference. Ssh string `json:"ssh"` // Path and arguments to external ssh binary. SocksProxy string `json:"socksProxy"` // Socks 5 proxy host. + HttpProxy string `json:"httpProxy"` // URL for HTTP CONNECT proxy CopyIsHardlink bool `json:"copyIsHardlink" default:"false"` // Set to enable server side copies using hardlinks. Description string `json:"description"` // Description of the remote. } @@ -3553,17 +4971,18 @@ type createSftpStorageRequest struct { func createSftpStorage() {} type sharefileConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - UploadCutoff string `json:"uploadCutoff" default:"128Mi"` // Cutoff for switching to multipart upload. - RootFolderId string `json:"rootFolderId" example:""` // ID of the root folder. - ChunkSize string `json:"chunkSize" default:"64Mi"` // Upload chunk size. - Endpoint string `json:"endpoint"` // Endpoint for API calls. - Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. + UploadCutoff string `json:"uploadCutoff" default:"128Mi"` // Cutoff for switching to multipart upload. + RootFolderId string `json:"rootFolderId" example:""` // ID of the root folder. + ChunkSize string `json:"chunkSize" default:"64Mi"` // Upload chunk size. + Endpoint string `json:"endpoint"` // Endpoint for API calls. + Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createSharefileStorageRequest struct { @@ -3619,9 +5038,11 @@ type smbConfig struct { Pass string `json:"pass"` // SMB password. Domain string `json:"domain" default:"WORKGROUP"` // Domain name for NTLM authentication. Spn string `json:"spn"` // Service principal name. + UseKerberos bool `json:"useKerberos" default:"false"` // Use Kerberos authentication. IdleTimeout string `json:"idleTimeout" default:"1m0s"` // Max time before closing idle connections. HideSpecialShare bool `json:"hideSpecialShare" default:"true"` // Hide special shares (e.g. print$) which users aren't supposed to access. CaseInsensitive bool `json:"caseInsensitive" default:"true"` // Whether the server is configured to be case-insensitive. + KerberosCcache string `json:"kerberosCcache"` // Path to the Kerberos credential cache (krb5cc). Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot"` // The encoding for the backend. Description string `json:"description"` // Description of the remote. } @@ -3807,32 +5228,6 @@ type createUnionStorageRequest struct { // @Router /storage/union [post] func createUnionStorage() {} -type uptoboxConfig struct { - AccessToken string `json:"accessToken"` // Your access token. - Private bool `json:"private" default:"false"` // Set to make uploaded files private - Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. -} - -type createUptoboxStorageRequest struct { - Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique - Path string `json:"path"` // Path of the storage - Config uptoboxConfig `json:"config"` // config for the storage - ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client -} - -// @ID CreateUptoboxStorage -// @Summary Create Uptobox storage -// @Tags Storage -// @Accept json -// @Produce json -// @Success 200 {object} model.Storage -// @Failure 400 {object} api.HTTPError -// @Failure 500 {object} api.HTTPError -// @Param request body createUptoboxStorageRequest true "Request body" -// @Router /storage/uptobox [post] -func createUptoboxStorage() {} - type webdavConfig struct { Url string `json:"url"` // URL of http host to connect to. Vendor string `json:"vendor" example:"fastmail"` // Name of the WebDAV site/service/software you are using. @@ -3847,6 +5242,7 @@ type webdavConfig struct { OwncloudExcludeShares bool `json:"owncloudExcludeShares" default:"false"` // Exclude ownCloud shares OwncloudExcludeMounts bool `json:"owncloudExcludeMounts" default:"false"` // Exclude ownCloud mounted storages UnixSocket string `json:"unixSocket"` // Path to a unix domain socket to dial to, instead of opening a TCP connection directly + AuthRedirect bool `json:"authRedirect" default:"false"` // Preserve authentication on redirect. Description string `json:"description"` // Description of the remote. } @@ -3870,15 +5266,16 @@ type createWebdavStorageRequest struct { func createWebdavStorage() {} type yandexConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - HardDelete bool `json:"hardDelete" default:"false"` // Delete files permanently rather than putting them into the trash. - Encoding string `json:"encoding" default:"Slash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. - SpoofUa bool `json:"spoofUa" default:"true"` // Set the user agent to match an official version of the yandex disk client. May help with upload performance. - Description string `json:"description"` // Description of the remote. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. + HardDelete bool `json:"hardDelete" default:"false"` // Delete files permanently rather than putting them into the trash. + Encoding string `json:"encoding" default:"Slash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + SpoofUa bool `json:"spoofUa" default:"true"` // Set the user agent to match an official version of the yandex disk client. May help with upload performance. + Description string `json:"description"` // Description of the remote. } type createYandexStorageRequest struct { @@ -3901,14 +5298,16 @@ type createYandexStorageRequest struct { func createYandexStorage() {} type zohoConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - Region string `json:"region" example:"com"` // Zoho region to connect to. - Encoding string `json:"encoding" default:"Del,Ctl,InvalidUtf8"` // The encoding for the backend. - Description string `json:"description"` // Description of the remote. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ClientCredentials bool `json:"clientCredentials" default:"false"` // Use client credentials OAuth flow. + Region string `json:"region" example:"com"` // Zoho region to connect to. + UploadCutoff string `json:"uploadCutoff" default:"10Mi"` // Cutoff for switching to large file upload api (>= 10 MiB). + Encoding string `json:"encoding" default:"Del,Ctl,InvalidUtf8"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createZohoStorageRequest struct { diff --git a/service/contentprovider/bitswap.go b/service/contentprovider/bitswap.go deleted file mode 100644 index 87ba0b6e6..000000000 --- a/service/contentprovider/bitswap.go +++ /dev/null @@ -1,69 +0,0 @@ -package contentprovider - -import ( - "context" - - "github.com/cockroachdb/errors" - "github.com/data-preservation-programs/singularity/store" - "github.com/data-preservation-programs/singularity/util" - bsnetwork "github.com/ipfs/boxo/bitswap/network/bsnet" - "github.com/ipfs/boxo/bitswap/server" - "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/host" - "github.com/multiformats/go-multiaddr" - "gorm.io/gorm" -) - -// BitswapServer represents a server instance for handling Bitswap protocol interactions. -// Bitswap is a peer-to-peer data trading protocol in which peers request the data they need, -// and respond to other peers' requests based on certain policies. -type BitswapServer struct { - // dbNoContext is a GORM database instance that doesn't use context for managing database connections. - dbNoContext *gorm.DB - - // host is a libp2p host used to build and configure a new Bitswap instance. - host host.Host -} - -func NewBitswapServer(dbNoContext *gorm.DB, private crypto.PrivKey, addrs ...multiaddr.Multiaddr) (*BitswapServer, error) { - h, err := util.InitHost([]libp2p.Option{libp2p.Identity(private)}, addrs...) - if err != nil { - return nil, errors.WithStack(err) - } - for _, m := range h.Addrs() { - logger.Info("libp2p listening on " + m.String()) - } - logger.Info("peerID: " + h.ID().String()) - return &BitswapServer{ - dbNoContext: dbNoContext, - host: h, - }, nil -} - -func (BitswapServer) Name() string { - return "Bitswap" -} - -// Start initializes the Bitswap server with the provided context. -// It sets up the necessary routing and networking components, -// and starts serving Bitswap requests. -// It returns channels that signal when the service has stopped or encountered an error. -func (s BitswapServer) Start(ctx context.Context, exitErr chan<- error) error { - // boxo v0.35.0: routing parameter removed from NewFromIpfsHost - net := bsnetwork.NewFromIpfsHost(s.host) - bs := &store.FileReferenceBlockStore{DBNoContext: s.dbNoContext} - bsserver := server.New(ctx, net, bs) - net.Start(bsserver) - - go func() { - <-ctx.Done() - net.Stop() - bsserver.Close() - s.host.Close() - if exitErr != nil { - exitErr <- nil - } - }() - return nil -} diff --git a/service/contentprovider/bitswap_test.go b/service/contentprovider/bitswap_test.go deleted file mode 100644 index d92368e6d..000000000 --- a/service/contentprovider/bitswap_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package contentprovider - -import ( - "context" - "testing" - "time" - - "github.com/data-preservation-programs/singularity/util" - "github.com/data-preservation-programs/singularity/util/testutil" - "github.com/stretchr/testify/require" - "gorm.io/gorm" -) - -func TestBitswapServer(t *testing.T) { - testutil.All(t, func(ctx context.Context, t *testing.T, db *gorm.DB) { - h, err := util.InitHost(nil) - require.NoError(t, err) - defer h.Close() - s := BitswapServer{ - dbNoContext: db, - host: h, - } - require.Equal(t, "Bitswap", s.Name()) - - exitErr := make(chan error, 1) - ctx, cancel := context.WithCancel(ctx) - err = s.Start(ctx, exitErr) - require.NoError(t, err) - time.Sleep(200 * time.Millisecond) - cancel() - select { - case <-time.After(1 * time.Second): - t.Fatal("bitswap server did not stop") - case err = <-exitErr: - require.NoError(t, err) - } - }) -} diff --git a/service/contentprovider/contentprovider.go b/service/contentprovider/contentprovider.go index 2913c2a2d..199397e27 100644 --- a/service/contentprovider/contentprovider.go +++ b/service/contentprovider/contentprovider.go @@ -2,14 +2,9 @@ package contentprovider import ( "context" - "encoding/base64" - "github.com/cockroachdb/errors" "github.com/data-preservation-programs/singularity/service" - "github.com/data-preservation-programs/singularity/util" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/multiformats/go-multiaddr" "gorm.io/gorm" ) @@ -20,95 +15,29 @@ type Service struct { } type Config struct { - HTTP HTTPConfig - Bitswap BitswapConfig + HTTP HTTPConfig } type HTTPConfig struct { EnablePiece bool EnablePieceMetadata bool + EnableIPFS bool Bind string } -type BitswapConfig struct { - Enable bool - IdentityKey string - ListenMultiAddrs []string -} - -// NewService creates a new Service instance with the provided database and configuration. -// -// The NewService function takes the following parameters: -// - db: The gorm.DB instance for database operations. -// - config: The Config struct containing the service configuration. -// -// The function performs the following steps: -// -// 1. Creates an empty Service instance. -// -// 2. If the HTTP server is enabled in the configuration, creates an HTTPServer instance and adds it to the servers slice. -// - The HTTPServer is configured with the bind address, database without context, and a DefaultHandlerResolver. -// -// 3. If the Bitswap server is enabled in the configuration, initializes the identity key based on the configuration. -// - If the identity key is not provided, generates a new peer identity key. -// - If the identity key is provided, decodes it from base64. -// - Unmarshals the private key from the identity key bytes. -// - If no listen multiaddresses are provided, sets a default listen multiaddress. -// - Converts each listen multiaddress string to a Multiaddr instance. -// - Initializes a libp2p host with the identity key and listen multiaddresses. -// - Logs the libp2p listening addresses and peer ID. -// - Creates a BitswapServer instance with the libp2p host and database without context, and adds it to the servers slice. -// -// 4. Returns the created Service instance and nil for the error if all steps are executed successfully. func NewService(db *gorm.DB, config Config) (*Service, error) { s := &Service{} - if config.HTTP.EnablePiece || config.HTTP.EnablePieceMetadata { + if config.HTTP.EnablePiece || config.HTTP.EnablePieceMetadata || config.HTTP.EnableIPFS { s.servers = append(s.servers, &HTTPServer{ dbNoContext: db, bind: config.HTTP.Bind, enablePiece: config.HTTP.EnablePiece, enablePieceMetadata: config.HTTP.EnablePieceMetadata, + enableIPFS: config.HTTP.EnableIPFS, }) } - if config.Bitswap.Enable { - var private []byte - if config.Bitswap.IdentityKey == "" { - var err error - private, _, _, err = util.GenerateNewPeer() - if err != nil { - return nil, errors.WithStack(err) - } - } else { - var err error - private, err = base64.StdEncoding.DecodeString(config.Bitswap.IdentityKey) - if err != nil { - return nil, errors.WithStack(err) - } - } - identityKey, err := crypto.UnmarshalPrivateKey(private) - if err != nil { - return nil, errors.WithStack(err) - } - if len(config.Bitswap.ListenMultiAddrs) == 0 { - config.Bitswap.ListenMultiAddrs = []string{"/ip4/0.0.0.0/tcp/0"} - } - var listenAddrs []multiaddr.Multiaddr - for _, addr := range config.Bitswap.ListenMultiAddrs { - ma, err := multiaddr.NewMultiaddr(addr) - if err != nil { - return nil, errors.WithStack(err) - } - listenAddrs = append(listenAddrs, ma) - } - - bitswapServer, err := NewBitswapServer(db, identityKey, listenAddrs...) - if err != nil { - return nil, errors.WithStack(err) - } - s.servers = append(s.servers, bitswapServer) - } return s, nil } diff --git a/service/contentprovider/contentprovider_test.go b/service/contentprovider/contentprovider_test.go index d795cec2d..17acb4638 100644 --- a/service/contentprovider/contentprovider_test.go +++ b/service/contentprovider/contentprovider_test.go @@ -2,12 +2,10 @@ package contentprovider import ( "context" - "encoding/base64" "testing" "time" "github.com/data-preservation-programs/singularity/service" - "github.com/data-preservation-programs/singularity/util" "github.com/data-preservation-programs/singularity/util/testutil" "github.com/stretchr/testify/require" "gorm.io/gorm" @@ -19,38 +17,9 @@ func TestContentProviderStart(t *testing.T) { HTTP: HTTPConfig{ EnablePiece: true, EnablePieceMetadata: true, + EnableIPFS: true, Bind: ":0", }, - Bitswap: BitswapConfig{ - Enable: true, - IdentityKey: "", - ListenMultiAddrs: nil, - }, - }) - require.NoError(t, err) - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - err = service.Start(ctx) - require.ErrorIs(t, err, context.DeadlineExceeded) - }) -} - -func TestContentProviderStart_WithIdentityKey(t *testing.T) { - testutil.All(t, func(ctx context.Context, t *testing.T, db *gorm.DB) { - private, _, _, err := util.GenerateNewPeer() - require.NoError(t, err) - encoded := base64.StdEncoding.EncodeToString(private) - service, err := NewService(db, Config{ - HTTP: HTTPConfig{ - EnablePiece: true, - EnablePieceMetadata: true, - Bind: ":0", - }, - Bitswap: BitswapConfig{ - Enable: true, - IdentityKey: encoded, - ListenMultiAddrs: nil, - }, }) require.NoError(t, err) ctx, cancel := context.WithTimeout(ctx, time.Second) @@ -66,9 +35,7 @@ func TestContentProviderStart_NoneEnabled(t *testing.T) { HTTP: HTTPConfig{ EnablePiece: false, EnablePieceMetadata: false, - }, - Bitswap: BitswapConfig{ - Enable: false, + EnableIPFS: false, }, }) require.NoError(t, err) diff --git a/service/contentprovider/http.go b/service/contentprovider/http.go index 43c190b76..6d173bffe 100644 --- a/service/contentprovider/http.go +++ b/service/contentprovider/http.go @@ -15,6 +15,10 @@ import ( "github.com/data-preservation-programs/singularity/store" "github.com/data-preservation-programs/singularity/util" "github.com/fxamacker/cbor/v2" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/gateway" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" @@ -26,6 +30,7 @@ type HTTPServer struct { bind string enablePiece bool enablePieceMetadata bool + enableIPFS bool } func (*HTTPServer) Name() string { @@ -100,6 +105,21 @@ func (s *HTTPServer) Start(ctx context.Context, exitErr chan<- error) error { e.GET("/piece/:id", s.handleGetPiece) e.HEAD("/piece/:id", s.handleGetPiece) } + if s.enableIPFS { + bs := &store.StorageBlockStore{DBNoContext: s.dbNoContext} + wrapped := &errorMappingBlockStore{inner: bs} + exch := offline.Exchange(wrapped) + bsvc := blockservice.New(wrapped, exch) + backend, err := gateway.NewBlocksBackend(bsvc) + if err != nil { + return err + } + gwHandler := gateway.NewHandler(gateway.Config{ + DeserializedResponses: false, + NoDNSLink: true, + }, backend) + e.Any("/ipfs/*", echo.WrapHandler(gwHandler)) + } e.GET("/health", func(c echo.Context) error { return c.String(http.StatusOK, "ok") }) @@ -387,3 +407,45 @@ func (s *HTTPServer) handleGetPiece(c echo.Context) error { return nil } + +// errorMappingBlockStore wraps a blockstore and maps specific errors to +// HTTP status codes via boxo/gateway's error mechanism. +type errorMappingBlockStore struct { + inner *store.StorageBlockStore +} + +func (e *errorMappingBlockStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + blk, err := e.inner.Get(ctx, c) + if err != nil && errors.Is(err, store.ErrFileHasChanged) { + return nil, gateway.NewErrorStatusCode(err, http.StatusConflict) + } + return blk, err +} + +func (e *errorMappingBlockStore) Has(ctx context.Context, c cid.Cid) (bool, error) { + return e.inner.Has(ctx, c) +} + +func (e *errorMappingBlockStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + return e.inner.GetSize(ctx, c) +} + +func (e *errorMappingBlockStore) Put(ctx context.Context, block blocks.Block) error { + return e.inner.Put(ctx, block) +} + +func (e *errorMappingBlockStore) PutMany(ctx context.Context, blks []blocks.Block) error { + return e.inner.PutMany(ctx, blks) +} + +func (e *errorMappingBlockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return e.inner.AllKeysChan(ctx) +} + +func (e *errorMappingBlockStore) HashOnRead(enabled bool) { + e.inner.HashOnRead(enabled) +} + +func (e *errorMappingBlockStore) DeleteBlock(ctx context.Context, c cid.Cid) error { + return e.inner.DeleteBlock(ctx, c) +} diff --git a/service/contentprovider/http_test.go b/service/contentprovider/http_test.go index b780ca7c2..2a0d25557 100644 --- a/service/contentprovider/http_test.go +++ b/service/contentprovider/http_test.go @@ -11,8 +11,12 @@ import ( "time" "github.com/data-preservation-programs/singularity/model" + "github.com/data-preservation-programs/singularity/store" "github.com/data-preservation-programs/singularity/util/testutil" "github.com/gotidy/ptr" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/gateway" "github.com/ipfs/boxo/util" "github.com/ipfs/go-cid" "github.com/labstack/echo/v4" @@ -230,3 +234,154 @@ func TestHTTPServerHandler(t *testing.T) { t.Run("car file exists", testfunc) }) } + +func makeGatewayHandler(db *gorm.DB) http.Handler { + bs := &store.StorageBlockStore{DBNoContext: db} + wrapped := &errorMappingBlockStore{inner: bs} + exch := offline.Exchange(wrapped) + bsvc := blockservice.New(wrapped, exch) + backend, _ := gateway.NewBlocksBackend(bsvc) + return gateway.NewHandler(gateway.Config{ + DeserializedResponses: false, + NoDNSLink: true, + }, backend) +} + +func TestIPFSGateway(t *testing.T) { + testutil.All(t, func(ctx context.Context, t *testing.T, db *gorm.DB) { + rootCid := testutil.TestCid + err := db.Create(&model.Car{ + PieceCID: model.CID(cid.NewCidV1(cid.FilCommitmentUnsealed, util.Hash([]byte("ipfs_test")))), + PieceSize: 128, + FileSize: 59 + 1 + 36 + 5, + PreparationID: ptr.Of(model.PreparationID(1)), + PieceType: model.DataPiece, + Attachment: &model.SourceAttachment{ + Preparation: &model.Preparation{}, + Storage: &model.Storage{ + Type: "local", + }, + }, + RootCID: model.CID(rootCid), + }).Error + require.NoError(t, err) + err = db.Create(&model.CarBlock{ + CarID: ptr.Of(model.CarID(1)), + CID: model.CID(rootCid), + CarOffset: 59, + CarBlockLength: 1 + 36 + 5, + Varint: varint.ToUvarint(36 + 5), + RawBlock: []byte("hello"), + }).Error + require.NoError(t, err) + + gw := makeGatewayHandler(db) + + t.Run("success", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/ipfs/"+rootCid.String()+"?format=car", nil) + rec := httptest.NewRecorder() + gw.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + require.Contains(t, rec.Header().Get("Content-Type"), "application/vnd.ipld.car") + require.Greater(t, rec.Body.Len(), 0) + }) + + t.Run("not found", func(t *testing.T) { + unknownCid := cid.NewCidV1(cid.Raw, util.Hash([]byte("unknown"))) + req := httptest.NewRequest(http.MethodGet, "/ipfs/"+unknownCid.String()+"?format=raw", nil) + rec := httptest.NewRecorder() + gw.ServeHTTP(rec, req) + // boxo gateway returns 404 for unknown blocks in raw format + require.Equal(t, http.StatusNotFound, rec.Code) + }) + + t.Run("raw format", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/ipfs/"+rootCid.String()+"?format=raw", nil) + rec := httptest.NewRecorder() + gw.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + require.Contains(t, rec.Header().Get("Content-Type"), "application/vnd.ipld.raw") + require.Equal(t, []byte("hello"), rec.Body.Bytes()) + }) + + t.Run("accept header", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/ipfs/"+rootCid.String(), nil) + req.Header.Set("Accept", "application/vnd.ipld.car") + rec := httptest.NewRecorder() + gw.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + }) + }) +} + +func TestIPFSGateway_FileChanged(t *testing.T) { + testutil.All(t, func(ctx context.Context, t *testing.T, db *gorm.DB) { + tmp := t.TempDir() + fileContent := []byte("12345678901234567890") + fileCid := cid.NewCidV1(cid.Raw, util.Hash(fileContent)) + err := os.WriteFile(filepath.Join(tmp, "data.txt"), fileContent, 0644) + require.NoError(t, err) + + changedRootCid := cid.NewCidV1(cid.Raw, util.Hash([]byte("changed_root"))) + + prep := &model.Preparation{} + require.NoError(t, db.Create(prep).Error) + + storage := &model.Storage{Type: "local", Path: tmp} + require.NoError(t, db.Create(storage).Error) + + attachment := &model.SourceAttachment{ + PreparationID: prep.ID, + StorageID: storage.ID, + } + require.NoError(t, db.Create(attachment).Error) + + dir := &model.Directory{AttachmentID: &attachment.ID} + require.NoError(t, db.Create(dir).Error) + + file := &model.File{ + Path: "data.txt", + Size: int64(len(fileContent)), + LastModifiedNano: testutil.GetFileTimestamp(t, filepath.Join(tmp, "data.txt")), + AttachmentID: &attachment.ID, + DirectoryID: &dir.ID, + } + require.NoError(t, db.Create(file).Error) + + blockLen := int32(len(varint.ToUvarint(uint64(fileCid.ByteLen())+uint64(len(fileContent)))) + fileCid.ByteLen() + len(fileContent)) + car := &model.Car{ + PieceCID: model.CID(cid.NewCidV1(cid.FilCommitmentUnsealed, util.Hash([]byte("changed_piece")))), + PieceSize: 256, + FileSize: 59 + int64(blockLen), + RootCID: model.CID(changedRootCid), + PreparationID: &prep.ID, + AttachmentID: &attachment.ID, + PieceType: model.DataPiece, + } + require.NoError(t, db.Create(car).Error) + + v := varint.ToUvarint(uint64(fileCid.ByteLen()) + uint64(len(fileContent))) + carBlock := &model.CarBlock{ + CarID: &car.ID, + CID: model.CID(fileCid), + CarOffset: 59, + CarBlockLength: blockLen, + Varint: v, + FileID: &file.ID, + FileOffset: 0, + } + require.NoError(t, db.Create(carBlock).Error) + + // modify source file + err = os.WriteFile(filepath.Join(tmp, "data.txt"), []byte("changed"), 0644) + require.NoError(t, err) + + gw := makeGatewayHandler(db) + // request the block CID directly -- the gateway will call Get() on the + // blockstore, which opens the source file and detects the change + req := httptest.NewRequest(http.MethodGet, "/ipfs/"+fileCid.String()+"?format=raw", nil) + rec := httptest.NewRecorder() + gw.ServeHTTP(rec, req) + require.Equal(t, http.StatusConflict, rec.Code) + }) +} diff --git a/storagesystem/rclone.go b/storagesystem/rclone.go index f0283ea19..e591ce694 100644 --- a/storagesystem/rclone.go +++ b/storagesystem/rclone.go @@ -314,13 +314,13 @@ func NewRCloneHandler(ctx context.Context, s model.Storage) (*RCloneHandler, err func overrideConfig(config *fs.ConfigInfo, s model.Storage) { config.UseServerModTime = true if s.ClientConfig.ConnectTimeout != nil { - config.ConnectTimeout = *s.ClientConfig.ConnectTimeout + config.ConnectTimeout = fs.Duration(*s.ClientConfig.ConnectTimeout) } if s.ClientConfig.Timeout != nil { - config.Timeout = *s.ClientConfig.Timeout + config.Timeout = fs.Duration(*s.ClientConfig.Timeout) } if s.ClientConfig.ExpectContinueTimeout != nil { - config.ExpectContinueTimeout = *s.ClientConfig.ExpectContinueTimeout + config.ExpectContinueTimeout = fs.Duration(*s.ClientConfig.ExpectContinueTimeout) } if s.ClientConfig.InsecureSkipVerify != nil { config.InsecureSkipVerify = true diff --git a/storagesystem/rclone_test.go b/storagesystem/rclone_test.go index 88969cfe8..388e819e1 100644 --- a/storagesystem/rclone_test.go +++ b/storagesystem/rclone_test.go @@ -165,6 +165,7 @@ func TestRCloneHandler_ReadS3Files(t *testing.T) { "force_path_style": "true", "region": "us-east-1", "chunk_size": "5Mi", + "copy_cutoff": "5Mi", "endpoint": fmt.Sprint("http://", localS3.Address(localstack.APIPort)), "env_auth": "false", }, diff --git a/storagesystem/types.go b/storagesystem/types.go index f2594b304..6ba7a4b3b 100644 --- a/storagesystem/types.go +++ b/storagesystem/types.go @@ -45,7 +45,6 @@ import ( _ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/union" - _ "github.com/rclone/rclone/backend/uptobox" _ "github.com/rclone/rclone/backend/webdav" _ "github.com/rclone/rclone/backend/yandex" _ "github.com/rclone/rclone/backend/zoho" @@ -187,17 +186,25 @@ func (option *Option) ToCLIFlag(prefix string, useBuiltIn bool, category string) EnvVars: []string{strings.ToUpper(strings.ReplaceAll(name, "-", "_"))}, } default: - //nolint:forcetypeassert + var value string + switch v := option.Default.(type) { + case interface{ String() string }: + value = v.String() + case []string: + value = strings.Join(v, ",") + case string: + value = v + default: + value = fmt.Sprintf("%v", v) + } flag = &cli.StringFlag{ Category: category, Name: name, Required: required, Usage: usage, - Value: option.Default.(interface { - String() string - }).String(), - Aliases: aliases, - EnvVars: []string{strings.ToUpper(strings.ReplaceAll(name, "-", "_"))}, + Value: value, + Aliases: aliases, + EnvVars: []string{strings.ToUpper(strings.ReplaceAll(name, "-", "_"))}, } } return flag diff --git a/storagesystem/types_test.go b/storagesystem/types_test.go index 89feab8ad..01f078927 100644 --- a/storagesystem/types_test.go +++ b/storagesystem/types_test.go @@ -7,7 +7,7 @@ import ( ) func TestBackends(t *testing.T) { - require.EqualValues(t, 41, len(Backends)) // Was 42 before amazonclouddrive removal + require.EqualValues(t, 40, len(Backends)) local := BackendMap["local"] require.Equal(t, "local", local.Name) } diff --git a/store/storage_blockstore.go b/store/storage_blockstore.go new file mode 100644 index 000000000..4f89b38b1 --- /dev/null +++ b/store/storage_blockstore.go @@ -0,0 +1,187 @@ +package store + +import ( + "context" + "io" + "sync" + + "github.com/cockroachdb/errors" + "github.com/data-preservation-programs/singularity/model" + "github.com/data-preservation-programs/singularity/storagesystem" + "github.com/data-preservation-programs/singularity/util" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + "gorm.io/gorm" +) + +// StorageBlockStore is a blockstore backed by the singularity database and +// rclone storage backends. It pools rclone handlers per storage and holds +// open a streaming file reader to serve sequential block reads efficiently. +// +// DAG nodes (directory structure, file roots) are stored inline in the DB +// and returned without any storage I/O. File-backed leaf blocks are read +// from source files via rclone, with a single-entry reader cache that +// exploits the depth-first traversal pattern: consecutive Get() calls for +// blocks from the same file read from the same held-open stream. +type StorageBlockStore struct { + DBNoContext *gorm.DB + + mu sync.Mutex + handlers map[model.StorageID]*storagesystem.RCloneHandler + active *fileReader +} + +type fileReader struct { + fileID model.FileID + reader io.ReadCloser + offset int64 +} + +func (s *StorageBlockStore) Has(ctx context.Context, c cid.Cid) (bool, error) { + var count int64 + err := s.DBNoContext.WithContext(ctx).Model(&model.CarBlock{}). + Select("cid").Where("cid = ?", model.CID(c)).Count(&count).Error + return count > 0, errors.WithStack(err) +} + +func (s *StorageBlockStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + var carBlock model.CarBlock + err := s.DBNoContext.WithContext(ctx). + Joins("File.Attachment.Storage"). + Where("car_blocks.cid = ?", model.CID(c)). + First(&carBlock).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, format.ErrNotFound{Cid: c} + } + if err != nil { + return nil, errors.WithStack(err) + } + + // inline block -- DAG nodes, small files + if carBlock.RawBlock != nil { + return blocks.NewBlockWithCid(carBlock.RawBlock, c) + } + + return s.readFileBlock(ctx, carBlock, c) +} + +func (s *StorageBlockStore) readFileBlock(ctx context.Context, carBlock model.CarBlock, c cid.Cid) (blocks.Block, error) { + s.mu.Lock() + defer s.mu.Unlock() + + storage := *carBlock.File.Attachment.Storage + file := *carBlock.File + blockLen := int64(carBlock.BlockLength()) + + handler, err := s.getHandler(ctx, storage) + if err != nil { + return nil, errors.WithStack(err) + } + + // try to read from held-open stream + if s.active != nil && s.active.fileID == file.ID && s.active.offset == carBlock.FileOffset { + data, err := s.readFromActive(blockLen) + if err != nil { + s.closeActive() + return nil, err + } + return blocks.NewBlockWithCid(data, c) + } + + // different file or non-sequential offset -- close old, open new + s.closeActive() + + reader, obj, err := handler.Read(ctx, file.Path, carBlock.FileOffset, file.Size-carBlock.FileOffset) + if err != nil { + return nil, errors.WithStack(err) + } + + same, explanation := storagesystem.IsSameEntry(ctx, file, obj) + if !same { + reader.Close() + return nil, errors.Wrap(ErrFileHasChanged, explanation) + } + + s.active = &fileReader{ + fileID: file.ID, + reader: reader, + offset: carBlock.FileOffset, + } + + data, err := s.readFromActive(blockLen) + if err != nil { + s.closeActive() + return nil, err + } + return blocks.NewBlockWithCid(data, c) +} + +func (s *StorageBlockStore) readFromActive(length int64) ([]byte, error) { + buf := make([]byte, length) + _, err := io.ReadFull(s.active.reader, buf) + if err != nil { + return nil, errors.WithStack(err) + } + s.active.offset += length + return buf, nil +} + +func (s *StorageBlockStore) closeActive() { + if s.active != nil { + s.active.reader.Close() + s.active = nil + } +} + +func (s *StorageBlockStore) getHandler(ctx context.Context, storage model.Storage) (*storagesystem.RCloneHandler, error) { + if s.handlers == nil { + s.handlers = make(map[model.StorageID]*storagesystem.RCloneHandler) + } + if h, ok := s.handlers[storage.ID]; ok { + return h, nil + } + h, err := storagesystem.NewRCloneHandler(ctx, storage) + if err != nil { + return nil, err + } + s.handlers[storage.ID] = h + return h, nil +} + +func (s *StorageBlockStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + var carBlock model.CarBlock + err := s.DBNoContext.WithContext(ctx).Where("cid = ?", model.CID(c)).First(&carBlock).Error + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return 0, format.ErrNotFound{Cid: c} + } + return 0, errors.WithStack(err) + } + return int(carBlock.BlockLength()), nil +} + +func (s *StorageBlockStore) Put(ctx context.Context, block blocks.Block) error { + return util.ErrNotImplemented +} + +func (s *StorageBlockStore) PutMany(ctx context.Context, blks []blocks.Block) error { + return util.ErrNotImplemented +} + +func (s *StorageBlockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, util.ErrNotImplemented +} + +func (s *StorageBlockStore) HashOnRead(enabled bool) {} + +func (s *StorageBlockStore) DeleteBlock(ctx context.Context, c cid.Cid) error { + return util.ErrNotImplemented +} + +// Close releases all held resources. +func (s *StorageBlockStore) Close() { + s.mu.Lock() + defer s.mu.Unlock() + s.closeActive() +}